ngram
listlengths
0
67.8k
[ "self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2", "self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month", "return ( self.id is None or other.id is None or self.id == other.id)", "self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return ( self.id is None", "or other.id is None or self.id == other.id) and self.first_name == other.first_name and", "= phone_2 self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page =", "self.email_3 = email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year", "self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year", "= notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self):", "None or self.id == other.id) and self.first_name == other.first_name and ( self.last_name is", "= company self.address = address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work =", "birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name", "birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name", "is None or other.last_name is None or self.last_name == other.last_name) def id_or_max(self): if", "self.address_2 = address_2 self.phone_2 = phone_2 self.notes = notes self.id = id self.all_phones_from_home_page", "sys import maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,", "anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 = phone_2", "company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None,", "\"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work,", "other.id) and self.first_name == other.first_name and ( self.last_name is None or other.last_name is", "homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day", "self.phone_2 = phone_2 self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page", "= email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year =", "self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.title = title self.company", "self.first_name == other.first_name and ( self.last_name is None or other.last_name is None or", "birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month", "other.first_name and ( self.last_name is None or other.last_name is None or self.last_name ==", "def __eq__(self, other): return ( self.id is None or other.id is None or", "return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile,", "email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None,", "self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day", "( self.id is None or other.id is None or self.id == other.id) and", "__eq__(self, other): return ( self.id is None or other.id is None or self.id", "= id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" %", "= anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes = notes self.id =", "nickname self.title = title self.company = company self.address = address self.phone_home = phone_home", "self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2", "id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % (", "first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None,", "address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax = fax", "self.address_2, self.phone_2, self.notes) def __eq__(self, other): return ( self.id is None or other.id", "self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.title", "other.last_name is None or self.last_name == other.last_name) def id_or_max(self): if self.id: return int(self.id)", "is None or self.id == other.id) and self.first_name == other.first_name and ( self.last_name", "= phone_work self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 =", "all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname", "self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2,", "self.phone_2, self.notes) def __eq__(self, other): return ( self.id is None or other.id is", "all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company,", "self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return", "self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage,", "maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None,", "first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.title = title", "homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None):", "phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None,", "anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes = notes", "= address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax =", "= all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name,", "all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name,", "address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None,", "middle_name self.last_name = last_name self.nickname = nickname self.title = title self.company = company", "= nickname self.title = title self.company = company self.address = address self.phone_home =", "self.last_name is None or other.last_name is None or self.last_name == other.last_name) def id_or_max(self):", "anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name", "= email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day =", "fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None,", "middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None,", "= all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title,", "self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2,", "( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1,", "= first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.title =", "notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return", "self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return ( self.id", "self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2,", "self.title = title self.company = company self.address = address self.phone_home = phone_home self.phone_mobile", "title self.company = company self.address = address self.phone_home = phone_home self.phone_mobile = phone_mobile", "( self.last_name is None or other.last_name is None or self.last_name == other.last_name) def", "= last_name self.nickname = nickname self.title = title self.company = company self.address =", "= birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year =", "def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None,", "self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname,", "import maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None,", "% ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax,", "__repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home,", "phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None,", "= address_2 self.phone_2 = phone_2 self.notes = notes self.id = id self.all_phones_from_home_page =", "self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax = fax self.email_1 = email_1 self.email_2", "= title self.company = company self.address = address self.phone_home = phone_home self.phone_mobile =", "= phone_mobile self.phone_work = phone_work self.fax = fax self.email_1 = email_1 self.email_2 =", "email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None,", "self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def", "self.address = address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax", "== other.first_name and ( self.last_name is None or other.last_name is None or self.last_name", "self.last_name = last_name self.nickname = nickname self.title = title self.company = company self.address", "other): return ( self.id is None or other.id is None or self.id ==", "phone_mobile self.phone_work = phone_work self.fax = fax self.email_1 = email_1 self.email_2 = email_2", "last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None,", "= email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month =", "birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2", "None or other.last_name is None or self.last_name == other.last_name) def id_or_max(self): if self.id:", "is None or other.id is None or self.id == other.id) and self.first_name ==", "notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name", "self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes", "= phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax = fax self.email_1 =", "or other.last_name is None or self.last_name == other.last_name) def id_or_max(self): if self.id: return", "self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name,", "Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None,", "self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other):", "self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return ( self.id is None or", "= anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 =", "self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def", "self.nickname = nickname self.title = title self.company = company self.address = address self.phone_home", "email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month", "= birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month =", "self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3,", "and self.first_name == other.first_name and ( self.last_name is None or other.last_name is None", "company self.address = address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work", "self.company = company self.address = address self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work", "email_3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year", "self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return ( self.id is", "last_name self.nickname = nickname self.title = title self.company = company self.address = address", "self.id == other.id) and self.first_name == other.first_name and ( self.last_name is None or", "anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes = notes self.id = id", "self.company, self.address, self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes)", "phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name =", "address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name", "__init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None,", "anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name =", "email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None,", "self.phone_work = phone_work self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3", "self.phone_home, self.phone_mobile, self.phone_work, self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self,", "= middle_name self.last_name = last_name self.nickname = nickname self.title = title self.company =", "def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\" % ( self.first_name, self.last_name, self.middle_name, self.nickname, self.title, self.company, self.address,", "nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None,", "address_2 self.phone_2 = phone_2 self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page", "anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name", "= homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day =", "phone_2 self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page", "self.notes) def __eq__(self, other): return ( self.id is None or other.id is None", "other.id is None or self.id == other.id) and self.first_name == other.first_name and (", "class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, phone_home=None, phone_mobile=None,", "fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage", "title=None, company=None, address=None, phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None,", "self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day", "id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname", "None or other.id is None or self.id == other.id) and self.first_name == other.first_name", "self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes = notes self.id", "= birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.address_2 =", "self.fax, self.email_1, self.email_2, self.email_3, self.homepage, self.address_2, self.phone_2, self.notes) def __eq__(self, other): return (", "self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return \"%s:%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\"", "None or self.last_name == other.last_name) def id_or_max(self): if self.id: return int(self.id) else: return", "or self.last_name == other.last_name) def id_or_max(self): if self.id: return int(self.id) else: return maxsize", "is None or self.last_name == other.last_name) def id_or_max(self): if self.id: return int(self.id) else:", "self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month", "and ( self.last_name is None or other.last_name is None or self.last_name == other.last_name)", "== other.id) and self.first_name == other.first_name and ( self.last_name is None or other.last_name", "email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.birthday_day = birthday_day", "birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year", "phone_home=None, phone_mobile=None, phone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None,", "all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname =", "= fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage =", "birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, address_2=None, phone_2=None, notes=None, id=None, all_phones_from_home_page=None, all_emails_from_home_page=None): self.first_name =", "self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage", "phone_work self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3", "self.id is None or other.id is None or self.id == other.id) and self.first_name", "from sys import maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None,", "self.phone_home = phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax = fax self.email_1", "= anniversary_month self.anniversary_year = anniversary_year self.address_2 = address_2 self.phone_2 = phone_2 self.notes =", "phone_home self.phone_mobile = phone_mobile self.phone_work = phone_work self.fax = fax self.email_1 = email_1", "or self.id == other.id) and self.first_name == other.first_name and ( self.last_name is None" ]
[ "Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a mesh with double coords #", "now read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\"))", "on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float", "---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision for this test #", "DeleteAllPlots() # # Ok, now read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\")", "# # <NAME>, Wed Jan 20 07:37:11 PST 2010 # Added ability to", "Jan 20 07:37:11 PST 2010 # Added ability to swtich between Silo's HDF5", "with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test", "DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a double", "24, 2006 # # Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST", "DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a mesh with double coords", "float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # #", "test double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\"))", "# test double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots()", "conversion to float # # Programmer: <NAME> # Date: September 24, 2006 #", "data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision for this", "# # Test ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\")", "Test ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() #", "# AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data on a float", "September 24, 2006 # # Modifications: # # <NAME>, Wed Jan 20 07:37:11", "<NAME>, Wed Jan 20 07:37:11 PST 2010 # Added ability to swtich between", "AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data on a double mesh", "test float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() #", "on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # #", "data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test", "# ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision for this test", "# Added ability to swtich between Silo's HDF5 and PDB data. # ----------------------------------------------------------------------------", "OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots()", "# Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST 2010 # Added", "off force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0", "nightly # # Test Case: xform_precision.py # # Tests: Transform manager's conversion to", "readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data", "test double data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_meshD\") DrawPlots() Test(\"float_xform_06\") DeleteAllPlots() Exit()", "xform_precision.py # # Tests: Transform manager's conversion to float # # Programmer: <NAME>", "Test(\"float_xform_04\") DeleteAllPlots() # # test double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\")", "# # Tests: Transform manager's conversion to float # # Programmer: <NAME> #", "# test float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots()", "20 07:37:11 PST 2010 # Added ability to swtich between Silo's HDF5 and", "2010 # Added ability to swtich between Silo's HDF5 and PDB data. #", "# # Programmer: <NAME> # Date: September 24, 2006 # # Modifications: #", "CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_meshD\")", "Ok, now read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots()", "readOptions) # # Test ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots()", "readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data (no", "# AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on", "first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a mesh", "swtich between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn", "a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) #", "mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # #", "a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test", "test float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() #", "double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\"))", "DeleteAllPlots() # # test float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots()", "# # Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST 2010 #", "double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data on", "2006 # # Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST 2010", "PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision for", "read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\"))", "coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data", "DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a double mesh #", "a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data", "# <NAME>, Wed Jan 20 07:37:11 PST 2010 # Added ability to swtich", "mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data", "CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\")", "# Test ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots()", "# test float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots()", "AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a mesh with double", "Transform manager's conversion to float # # Programmer: <NAME> # Date: September 24,", "# # test float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\")", "PST 2010 # Added ability to swtich between Silo's HDF5 and PDB data.", "float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data on", "(no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read", "Date: September 24, 2006 # # Modifications: # # <NAME>, Wed Jan 20", "HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single", "= 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data (no conversion) first", "Added ability to swtich between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\"))", "Wed Jan 20 07:37:11 PST 2010 # Added ability to swtich between Silo's", "# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: xform_precision.py # # Tests:", "on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double", "float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok,", "a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data", "float # # Programmer: <NAME> # Date: September 24, 2006 # # Modifications:", "test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary", "DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a float", "data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now", "CLASSES: nightly # # Test Case: xform_precision.py # # Tests: Transform manager's conversion", "precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) #", "float data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # #", "DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data on a float mesh #", "AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a", "ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # #", "OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_meshD\") DrawPlots()", "data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) #", "0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data (no conversion) first #", "# Turn off force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"]", "Programmer: <NAME> # Date: September 24, 2006 # # Modifications: # # <NAME>,", "Modifications: # # <NAME>, Wed Jan 20 07:37:11 PST 2010 # Added ability", "to swtich between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # #", "# test double data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_meshD\") DrawPlots() Test(\"float_xform_06\") DeleteAllPlots()", "Tests: Transform manager's conversion to float # # Programmer: <NAME> # Date: September", "OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\")", "<NAME> # Date: September 24, 2006 # # Modifications: # # <NAME>, Wed", "this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test", "Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a float mesh", "for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # #", "# Ok, now read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\")", "DeleteAllPlots() # # test double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots()", "and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force single precision", "ability to swtich between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) #", "Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a double mesh", "to float # # Programmer: <NAME> # Date: September 24, 2006 # #", "AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data on a float mesh", "Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data (no conversion)", "# # test double data on a float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\")", "# # Ok, now read a mesh with double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots()", "# Tests: Transform manager's conversion to float # # Programmer: <NAME> # Date:", "# AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a mesh with", "---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: xform_precision.py # # Tests: Transform", "double coords # AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float", "data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test", "AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double data on a", "conversion) first # AddPlot(\"Mesh\",\"mesh\") DrawPlots() Test(\"float_xform_01\") DeleteAllPlots() # # Ok, now read a", "mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\") DeleteAllPlots() # # test double data on a", "# AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data on a double", "DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on a float mesh #", "Test(\"float_xform_03\") DeleteAllPlots() # # test float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\")", "DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data on a double mesh #", "# CLASSES: nightly # # Test Case: xform_precision.py # # Tests: Transform manager's", "# # Turn off force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force", "# Programmer: <NAME> # Date: September 24, 2006 # # Modifications: # #", "float mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_mesh\") DrawPlots() Test(\"float_xform_05\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test double", "# readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float", "# # Test Case: xform_precision.py # # Tests: Transform manager's conversion to float", "# Test Case: xform_precision.py # # Tests: Transform manager's conversion to float #", "# # test double data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElevD_on_meshD\") DrawPlots() Test(\"float_xform_06\")", "single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\", readOptions)", "force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] = 0 SetDefaultFileOpenOptions(\"Silo\",", "# Date: September 24, 2006 # # Modifications: # # <NAME>, Wed Jan", "07:37:11 PST 2010 # Added ability to swtich between Silo's HDF5 and PDB", "Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off force", "Case: xform_precision.py # # Tests: Transform manager's conversion to float # # Programmer:", "Test Case: xform_precision.py # # Tests: Transform manager's conversion to float # #", "SetDefaultFileOpenOptions(\"Silo\", readOptions) # # Test ordinary float data (no conversion) first # AddPlot(\"Mesh\",\"mesh\")", "manager's conversion to float # # Programmer: <NAME> # Date: September 24, 2006", "between Silo's HDF5 and PDB data. # ---------------------------------------------------------------------------- OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # Turn off", "# AddPlot(\"Mesh\",\"meshD\") DrawPlots() Test(\"float_xform_02\") DeleteAllPlots() CloseDatabase(silo_data_path(\"quad_disk.silo\")) OpenDatabase(silo_data_path(\"quad_disk.silo\")) # # test float data on", "mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_mesh\") DrawPlots() Test(\"float_xform_03\") DeleteAllPlots() # # test float data on a", "Turn off force single precision for this test # readOptions=GetDefaultFileOpenOptions(\"Silo\") readOptions[\"Force Single\"] =", "# # test float data on a double mesh # AddPlot(\"Pseudocolor\",\"sphElev_on_meshD\") DrawPlots() Test(\"float_xform_04\")" ]
[ "self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable", "# - button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree:", "= self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype, description) def on_map_node(self, tree,", "self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title = tk.Entry(frame, name=\"treeTitle\",", "node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree,", "tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) #", "on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter =", "self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0,", "self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id): sql =", "command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title = tk.Entry(frame,", "def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter,", "button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander,", "name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # -", "node, frame): node_id = node[\"node_id\"] if node_id == 0: return # some vars", "path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\"", "collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\",", "0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id):", "TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host", "= tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter =", "\"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter", "on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter,", "execute=True) def on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table) formatter = \"inline\"", "sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file,", "self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE", "\"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame): node_id = node[\"node_id\"] if", "path, real_path, result, datatype, description) def on_map_node(self, tree, node): pass def on_destroy_node(self, tree,", "description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter", "column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install", "= nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame =", "textvariable=self._stringvar_expander, command=command) # - button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self,", "self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame): node_id = node[\"node_id\"] if node_id", "tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit =", "self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype, description) def on_map_node(self,", "weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame", "megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view", "tree, node): pass def on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self, tree,", "result, datatype, description) def on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node): pass", "titlebar # - button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander =", "entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0,", "# - button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame,", "node[\"node_id\"] if node_id == 0: return # some vars title = node[\"title\"] result", "Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype,", "\"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql", "button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # -", "install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2,", "= node[\"node_id\"] if node_id == 0: return # some vars title = node[\"title\"]", "as tk from megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host):", "= None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql", "node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\",", "weight=1) # Fill titlebar # - button expander command = (lambda tree=tree, node_id=node_id:", "tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) #", "import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder", "column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame,", "collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype, description)", "self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id): sql = self._stringvar_title.get()", "- install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0,", "node): pass def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node, *args, **kwargs):", "node_id == 0: return # some vars title = node[\"title\"] result = node[\"data\"][\"result\"]", "node_id = node[\"node_id\"] if node_id == 0: return # some vars title =", "== 0: return # some vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype", "self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter =", "= (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # -", "- button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree,", "def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host =", "= tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter = None def on_change_database(self,", "FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame):", "build_node(self, tree, node, frame): node_id = node[\"node_id\"] if node_id == 0: return #", "title = node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file", "lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0,", "tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter = None", "node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"]", "node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2,", "tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def", "self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\")", "node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self,", "def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\")", "self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql,", "frame): node_id = node[\"node_id\"] if node_id == 0: return # some vars title", "\"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) #", "datatype, description) def on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node): pass def", "tree, node): pass def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node, *args,", "0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id", "on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove()", "self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0,", "= \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table):", "= tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry", "node, *args, **kwargs): pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id", "tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1,", "formatter, execute=True) def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter = \"inline\"", "button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit = tk.Button(frame,", "- entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self,", "sql = \"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def", "on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True)", "some vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description =", "self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) #", "= node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if", "# some vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description", "node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit", "__init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host = host", "entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree,", "{}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql = \"DROP", "execute=True) def build_node(self, tree, node, frame): node_id = node[\"node_id\"] if node_id == 0:", "on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node,", "self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return", "tkinter as tk from megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder,", "= node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\")", "# Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result,", "def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node, *args, **kwargs): pass def", "# collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2,", "state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install", "= \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT * FROM", "textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0,", "# - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\")", "command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) #", "node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"]", "table): sql = \"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True)", "button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\")", "edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) #", "if node_id == 0: return # some vars title = node[\"title\"] result =", "== 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree,", "node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self,", "sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame =", "parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander", "on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"]", "entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id,", "formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT *", "formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table) formatter =", "# Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame", "= \"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self,", "header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # -", "command=command) # - button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id,", "real_path, result, datatype, description) def on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node):", "tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def", "self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter = None def", "None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table)", "real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else", "20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path,", "def on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self, tree, node): node_id =", "sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) #", "if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id =", "else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1)", "column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\")", "parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar()", "on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True)", "*args, **kwargs): pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id ==", "tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\",", "tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter = None def on_change_database(self, path):", "sql = \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self,", "{}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT", "button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id))", "host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander = tk.StringVar()", "pass def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node, *args, **kwargs): pass", "= \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table)", "nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None", "node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id", "formatter, execute=True) def build_node(self, tree, node, frame): node_id = node[\"node_id\"] if node_id ==", "self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title", "None self._nodebar = None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self,", "== 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if", "node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"]", "\"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table)", "self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1,", "from megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view =", "node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"]", "node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id)", "# config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar", "**kwargs): pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0:", "return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id ==", "sql = \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self,", "def on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node): pass def on_feed_node(self, tree,", "= tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id))", "table): sql = \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def", "frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype, description) def", "node): pass def on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self, tree, node):", "config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar #", "Fill titlebar # - button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander", "expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command)", "= tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit = tk.Button(frame, text=\"edit\",", "self._collapsable_frame, file, path, real_path, result, datatype, description) def on_map_node(self, tree, node): pass def", "= node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config", "node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5),", "pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return", "= node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file =", "def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter,", "frame.columnconfigure(2, weight=1) # Fill titlebar # - button expander command = (lambda tree=tree,", "stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0)", "nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander =", "self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id,", "tree, node, *args, **kwargs): pass def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if", "node_id, self._collapsable_frame, file, path, real_path, result, datatype, description) def on_map_node(self, tree, node): pass", "= node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node):", "frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # - button expander", "host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar = None", "self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0:", "sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1,", "def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id): sql = self._stringvar_title.get() self._parent_view.push_sql(sql,", "text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title", "formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql = \"DROP TABLE", "return # some vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"]", "self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table) formatter", "padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0,", "class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill", "\"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql", "if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0)", "datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path", "entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # -", "{}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame): node_id", "node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"]", "file, path, real_path, result, datatype, description) def on_map_node(self, tree, node): pass def on_destroy_node(self,", "FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table): sql =", "self._stringvar_title.set(title) # config header frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill", "self._collapsable_frame = None self._nodebar = None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path)", "frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # - button expander command =", "padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame", "# - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar", "def on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\")", "= node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path =", "node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"]", "= None self._nodebar = None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def", "node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self,", "5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame", "button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame,", "column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0, column=1, padx=(0, 5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") #", "= node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path =", "node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5), sticky=\"w\") button_edit.grid(row=0,", "0: return # some vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype =", "import tkinter as tk from megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view,", "result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path", "on_destroy_node(self, tree, node): pass def on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self,", "_on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id): sql = self._stringvar_title.get() self._parent_view.push_sql(sql, self._formatter)", "path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter = \"inline\"", "name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit button_edit = tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda", "= tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1) # - install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20))", "table): sql = \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def", "= node[\"data\"][\"description\"] file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter =", "# - entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e,", "TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_explore(self, table): sql =", "self._on_click_edit(tree, node_id)) # - entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\",", "= None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM", "def on_click_explore(self, table): sql = \"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql,", "Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header frame frame.columnconfigure(0,", "e, self=self, node_id=node_id, tree=tree: self._on_click_sql(tree, node_id)) # - install button_expander.grid(row=0, column=0, padx=(0, 5),", "install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self,", "description) def on_map_node(self, tree, node): pass def on_destroy_node(self, tree, node): pass def on_feed_node(self,", "= \"DELETE FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def on_click_drop(self, table):", "Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder =", "\"SELECT * FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree,", "on_expand_node(self, tree, node): node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid()", "title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda e, self=self, node_id=node_id, tree=tree:", "node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title) # config header", "formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame): node_id =", "None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql =", "self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar", "tk.Button(frame, text=\"edit\", name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title", "return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def _on_click_edit(self, tree, node_id): sql", "self._nodebar = None self._formatter = None def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table):", "- install self._collapsable_frame.grid(row=1, column=2, sticky=\"w\", padx=(0, 20)) # Fill collapsable frame self._nodebar =", "pass def on_feed_node(self, tree, node, *args, **kwargs): pass def on_expand_node(self, tree, node): node_id", "self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame", "weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # - button expander command", "node_id = node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"-\") self._collapsable_frame.grid() def on_collapse_node(self, tree,", "weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # - button expander command = (lambda", "= \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node, frame): node_id = node[\"node_id\"]", "# Fill titlebar # - button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id))", "5), sticky=\"w\") entry_title.grid(row=0, column=2, sticky=\"nswe\") # collapsable_frame self._collapsable_frame = tk.Frame(frame, class_=\"CollapsableFrame\") self._collapsable_frame.columnconfigure(0, weight=1)", "= node[\"node_id\"] if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id):", "def build_node(self, tree, node, frame): node_id = node[\"node_id\"] if node_id == 0: return", "tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button edit", "class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view = parent_view self._nodebar_builder = nodebar_builder", "tk from megawidget.tree import Hook class TreeHook(Hook): def __init__(self, parent_view, nodebar_builder, host): self._parent_view", "vars title = node[\"title\"] result = node[\"data\"][\"result\"] datatype = node[\"data\"][\"type\"] description = node[\"data\"][\"description\"]", "frame frame.columnconfigure(0, weight=0) frame.columnconfigure(1, weight=0) frame.columnconfigure(2, weight=1) # Fill titlebar # - button", "node_id)) # - entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title) entry_title.bind(\"<Button-1>\", lambda", "* FROM {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql, formatter, execute=True) def build_node(self, tree, node,", "= host self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar =", "file = node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] #", "= node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate", "node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars self._stringvar_expander.set(\"-\" if node[\"expanded\"] else \"+\") self._stringvar_title.set(title)", "= parent_view self._nodebar_builder = nodebar_builder self._host = host self._stringvar_expander = tk.StringVar() self._stringvar_title =", "(lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\", textvariable=self._stringvar_expander, command=command) # - button", "name=\"buttonEdit\", command=lambda self=self, node_id=node_id, tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title =", "if node_id == 0: return self._stringvar_expander.set(\"+\") self._collapsable_frame.grid_remove() def _on_click_sql(self, tree, node_id): tree.collexp(node_id) def", "self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path, real_path, result, datatype, description) def on_map_node(self, tree, node):", "- button expander command = (lambda tree=tree, node_id=node_id: tree.collexp(node_id)) button_expander = tk.Button(frame, name=\"treeExpanderButton\",", "padx=(0, 20)) # Fill collapsable frame self._nodebar = self._nodebar_builder.build(self, node_id, self._collapsable_frame, file, path,", "def on_change_database(self, path): self._parent_view.open_database(path) def on_click_truncate(self, table): sql = \"DELETE FROM {}\".format(table) formatter", "execute=True) def on_click_drop(self, table): sql = \"DROP TABLE {}\".format(table) formatter = \"inline\" self._parent_view.push_sql(sql,", "node[\"data\"][\"file\"] path = node[\"data\"][\"path\"] real_path = node[\"data\"][\"realpath\"] self._formatter = node[\"data\"][\"formatter\"] # Populate stringvars", "tree, node, frame): node_id = node[\"node_id\"] if node_id == 0: return # some", "tree=tree: self._on_click_edit(tree, node_id)) # - entry title entry_title = tk.Entry(frame, name=\"treeTitle\", state=\"readonly\", textvariable=self._stringvar_title)", "self._stringvar_expander = tk.StringVar() self._stringvar_title = tk.StringVar() self._collapsable_frame = None self._nodebar = None self._formatter" ]
[ "-> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins,", "pytest from hypothesis import given from monty import monty from tests import strategies", "user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token:", "pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) ->", "@given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None: user", "== dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured,", "github_access_token: Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] ==", "None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens,", "dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login:", "<gh_stars>0 import pytest from hypothesis import given from monty import monty from tests", "str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] == github_login with pytest.raises(ValueError):", "from monty import monty from tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins,", "test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login", "from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None:", "strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username']", "hypothesis import given from monty import monty from tests import strategies from tests.utils", "str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with", "monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None:", "tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login:", "import pytest from hypothesis import given from monty import monty from tests import", "Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login)", "import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str)", "invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError):", "Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] == github_login", "def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] ==", "from hypothesis import given from monty import monty from tests import strategies from", "with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str)", "test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert", "strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) ->", "from tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str,", "def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value)", "monty import monty from tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins)", "= monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def", "strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None: user =", "-> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] == github_login with pytest.raises(ValueError): monty.load_github_user(invalid_github_login,", "import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user =", "assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login: str,", "strategies.invalid_github_logins) def test_load_github_user(github_login: str, github_access_token: Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login,", "tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user", "monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins) def test_load_github_user(github_login:", "str, github_access_token: Secured, invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login']", "given from monty import monty from tests import strategies from tests.utils import Secured", "user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login) @given(strategies.github_logins, strategies.github_access_tokens, strategies.invalid_github_logins)", "monty from tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login:", "invalid_github_login: str) -> None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] == github_login with", "None: user = monty.load_github_user(github_login, access_token=github_access_token.value) assert user['login'] == github_login with pytest.raises(ValueError): monty.load_github_user(invalid_github_login, access_token=github_access_token.value)", "import monty from tests import strategies from tests.utils import Secured @given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def", "import given from monty import monty from tests import strategies from tests.utils import", "@given(strategies.dockerhub_logins, strategies.invalid_dockerhub_logins) def test_load_dockerhub_user(dockerhub_login: str, invalid_dockerhub_login: str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert", "str) -> None: user = monty.load_dockerhub_user(dockerhub_login) assert user['username'] == dockerhub_login with pytest.raises(ValueError): monty.load_dockerhub_user(invalid_dockerhub_login)" ]
[ "__init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector", "print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal =", "self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z", "np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT = np.zeros((nw,nw)) # \\hat", "= scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ =", "def proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n): for j in xrange(i+1,n):", "np.ones(T.shape) nw = T.shape[0] for i in range(nw): for j in range(i+1,nw): P[i,j]", "and (res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) # solve def", "1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z", "print 'Incomplete: max iterations reached', i if i != 0: stop, res_pri, e_pri,", "def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ =", "= copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre =", "copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT()", "self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw))", "update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0]", "self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw)", "theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta =", "numpy as np import copy import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None):", "np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw))", "= (A[i,j]+A[j,i])/2 # if mean<0: # mean = 0 A[i,j] = mean A[j,i]", "range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY", "max iterations reached', i if i != 0: stop, res_pri, e_pri, res_dual, e_dual", "e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2))", "= np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT = np.zeros((nw,nw)) #", "= [] self.eprilist = [] self.edualist = [] self.objlist = [] # In[]", "P[j,i] = P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V =", "self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ =", "np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta", "self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j]", "10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale =", "hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V =", "self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho)", "np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma", "ADMM model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations reached' t1", "1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta)", "= (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D", "res_pri print ' e_pri:', e_pri print ' s:', res_dual print ' e_dual:', e_dual", "res_dual print ' e_dual:', e_dual break # new_rho = self.rho threshold = 10", "np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P", "= copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre =", "self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY =", "= np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta =", "# self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i in range(self.nw): theta[i,i] =", "for CPM-C model with parital correlation based translation function import numpy as np", "nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw))", "theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta =", "return (stop, res_pri, e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000):", "for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] =", "= self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 =", "self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta", "= mean A[j,i] = mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for", "P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta()", "(LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for", "time.time() for i in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre =", "stop: self.status = 'Optimal' if verbose: print \"Admm stop early at Iteration \",i", "= np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) #", "for i in xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if", "check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def update_T(self): LAMBDA,D", "for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean =", "np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta),", "iterations reached' t1 = time.time() for i in range(admmMaxIters): self.iter = i Q_pre", "#For ICDM review only, please do not distribute #ALL RIGHTS RESERVED #ADMM solver", "self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho =", "result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,)) retVal[:,1] = np.reshape(hatresult,(self.length,)) return", "theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS)", "(res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) # solve def __call__(self,", "nw*(nw+1)/2 #vector length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw", "do not distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C model with parital", "with parital correlation based translation function import numpy as np import copy import", "theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre", "stop early at Iteration \",i print ' r:', res_pri print ' e_pri:', e_pri", "norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop =", "= np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw))", "theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self):", "if 'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM' break", "as np import copy import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho", "np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT())", "max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2)))", "copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP)", "e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri =", "np.zeros((nw,nw)) # P & V self.plist = [] self.dlist = [] self.eprilist =", "= np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r", "(self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP", "= np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) #", "np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V self.plist = [] self.dlist =", "in range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return", "P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def", "theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3 =", "s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5 = self.hatP -", "solver ADMM model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations reached'", "e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status =", "<= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs,", "hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ =", "in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in range(i+1,self.nw):", "myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length =", "(res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho /", "for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def", "Singular matrix, exit ADMM' break else: raise # if i>=admmMaxIters-1: print 'Incomplete: max", "'Encounter LinAlgError: Singular matrix, exit ADMM' break else: raise # if i>=admmMaxIters-1: print", "r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR", "self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs", "= P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT)", "s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5", "np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D =", "rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector length of", "self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual <= e_dual) return", "self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V =", "assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z =", "T.shape[0] for i in range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i]", "self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre,", "j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta", "for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j]", "in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1)", "# print '\\n solver ADMM model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete:", "ADMM' break else: raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i", "= self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A):", "allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw * e_abs + e_rel", "self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def", "self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): print 'Encounter", "length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw;", "# theta & T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P &", "range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta)))", "= (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q =", "e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM", "self.objlist = [] # In[] help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape)", "= 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta)", "eps_rel, verbose) if stop: self.status = 'Optimal' if verbose: print \"Admm stop early", "mean = 0 A[i,j] = mean A[j,i] = mean return A def update_P(self):", "correlation based translation function import numpy as np import copy import time class", "self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def", "if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result =", "in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in range(i+1,self.nw):", "def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT", "e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s", "print '\\n solver ADMM model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max", "update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P)", "paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V", "8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i", "allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2", "RESERVED #ADMM solver for CPM-C model with parital correlation based translation function import", "= nw*(nw+1)/2 #vector length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x))", "-T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V", "copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V()", "return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return", "CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3", "return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables", "= self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm =", "as err: if 'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular matrix, exit", "range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c", "(self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta)", "self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre", "self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in", "range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c", "return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2)", "print ' e_pri:', e_pri print ' s:', res_dual print ' e_dual:', e_dual break", "eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7: lambdaADMM = ',self.lamb", "# solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model", "0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if", "= np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2 =", "r = norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4", "self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw):", "e_dual break # new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho =", "hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre", "np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y", "solver for CPM-C model with parital correlation based translation function import numpy as", "[] self.dlist = [] self.eprilist = [] self.edualist = [] self.objlist = []", "(np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual)", "'Optimal' if verbose: print \"Admm stop early at Iteration \",i print ' r:',", "r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r", "\",i print ' r:', res_pri print ' e_pri:', e_pri print ' s:', res_dual", "i in range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j]", "np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii =", "except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular", "= theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta", "= i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre =", "1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ", "res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and", "[] # In[] help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw =", "= (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self):", "if verbose: print \"Admm stop early at Iteration \",i print ' r:', res_pri", "= r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <=", "= 'Optimal' if verbose: print \"Admm stop early at Iteration \",i print '", "if i != 0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs,", "s:', res_dual print ' e_dual:', e_dual break # new_rho = self.rho threshold =", "break # new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho", "self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i in", "scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ", "self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA()", "theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i", "= ',self.lamb self.status = 'Incomplete: max iterations reached' t1 = time.time() for i", "in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre =", "i in xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0:", "= np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the", "= np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw)) self.U", "def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length = nw*(nw+1)/2", "= T.shape[0] for i in range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j])", "range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i]", "= 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY", "theta & T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V", "norm = np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3", "matrix, exit ADMM' break else: raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations", "avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result", "*( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop", "err: if 'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM'", "i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2)", "the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw))", "lamb self.length = nw*(nw+1)/2 #vector length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS", "update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T)", "scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time()", "scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50:", "range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta", "np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n): for j in", "= new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z =", "self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err:", "' r:', res_pri print ' e_pri:', e_pri print ' s:', res_dual print '", "(self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self):", "self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm", "/ new_rho self.rho = new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY =", "e_abs, e_rel, verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP", "i != 0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel,", "A.shape[0] for i in xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 #", "= -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self):", "self.U = np.zeros((nw,nw)) # P & V self.plist = [] self.dlist = []", "P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def", "verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 =", "V self.plist = [] self.dlist = [] self.eprilist = [] self.edualist = []", "def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def update_T(self):", "if stop: self.status = 'Optimal' if verbose: print \"Admm stop early at Iteration", "* e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs +", "e_pri) and (res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) # solve", "trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m =", "self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i]", "= (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw)", "= 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale", "self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err):", "self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT", "def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n =", "new_rho = self.rho/2.0 scale = self.rho / new_rho self.rho = new_rho self.U =", "e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal' if", "in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta", "copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V)", "= [] self.objlist = [] # In[] help/debug methods def computePartialCorrelation(self, T): P", "A[j,i] = mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in", "self.dlist = [] self.eprilist = [] self.edualist = [] self.objlist = [] #", "def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta", "distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C model with parital correlation based", "\\Gamma in the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP", "exit ADMM' break else: raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations reached',", "\"Admm stop early at Iteration \",i print ' r:', res_pri print ' e_pri:',", "not distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C model with parital correlation", "In[] help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0] for", "assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in", "s = norm(allS) e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V))", "= 'Incomplete: max iterations reached' t1 = time.time() for i in range(admmMaxIters): self.iter", "max iterations reached' t1 = time.time() for i in range(admmMaxIters): self.iter = i", "np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,)) retVal[:,1] = np.reshape(hatresult,(self.length,))", "function import numpy as np import copy import time class myADMMSolver: def __init__(self,lamb,", "self.Z = np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw))", "(A[i,j]+A[j,i])/2 # if mean<0: # mean = 0 A[i,j] = mean A[j,i] =", "self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U", "np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre", "'\\n solver ADMM model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations", "= copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre =", "self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q=", "j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean = 0", "copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError", "\" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2])", "early at Iteration \",i print ' r:', res_pri print ' e_pri:', e_pri print", "LinAlgError: Singular matrix, exit ADMM' break else: raise # if i>=admmMaxIters-1: print 'Incomplete:", "self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n): for", "parital correlation based translation function import numpy as np import copy import time", "r:', res_pri print ' e_pri:', e_pri print ' s:', res_dual print ' e_dual:',", "= copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except", "self.status = 'Optimal' if verbose: print \"Admm stop early at Iteration \",i print", "= np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta", "= copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T()", "= theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for", "(LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D =", "e_rel, verbose): r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4", "V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall())", "new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual):", "def computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0] for i in range(nw):", "= A.shape[0] for i in xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2", "V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) #", "assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] =", "np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm = hat_x.shape[0]", "t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime", "= self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[]", "= np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,)) retVal[:,1] =", "review only, please do not distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C", "= np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw * e_abs + e_rel *", "= self.rho/2.0 scale = self.rho / new_rho self.rho = new_rho self.U = scale*self.U", "= self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) *", "self.rho = new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z", "i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta)", "verbose: print \"Admm stop early at Iteration \",i print ' r:', res_pri print", "self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for", "self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1", "= lamb self.length = nw*(nw+1)/2 #vector length of trianglular matrix self.S = np.cov(np.transpose(x))", "e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n", "hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T", "proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n): for j in xrange(i+1,n): mean", "self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5 = self.hatP - hatP_pre allS", "= self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i", "assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n):", "self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z", "s4 = self.P-P_pre s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s =", "def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a,", "self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def", "= self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta r2", "* max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *(", "mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i]", "(res_pri <= e_pri) and (res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual)", "= 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert", "= [] self.dlist = [] self.eprilist = [] self.edualist = [] self.objlist =", "self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) #", "for i in range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] =", "* (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri)", "= time.time() for i in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre", "self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) #", "= np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T())", "self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR)", "(threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho / new_rho self.rho = new_rho self.U", "print ' s:', res_dual print ' e_dual:', e_dual break # new_rho = self.rho", "if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho", "tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def update_T(self): LAMBDA,D =", "# self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+", "def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7: lambdaADMM", "# new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif", "new_rho self.rho = new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY", "for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j", "self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta", "hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try:", "= self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j])", "print \"Admm stop early at Iteration \",i print ' r:', res_pri print '", "h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3):", "in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i]", "self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix' in", "= (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta)", "i in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre", "self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T -self.theta r2 =", "range(nw): for j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P", "for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] =", "hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V)", "Iteration \",i print ' r:', res_pri print ' e_pri:', e_pri print ' s:',", "n = A.shape[0] for i in xrange(n): for j in xrange(i+1,n): mean =", "= copy.deepcopy(self.P) hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta()", "self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs,", "self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm", "= np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i in range(self.nw):", "theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c =", "self.plist = [] self.dlist = [] self.eprilist = [] self.edualist = [] self.objlist", "= np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 =", "if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if i != 0: stop,", "in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y", "in str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM' break else: raise #", "np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw))", "# def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ", "in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m): return", "help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0] for i", "e_pri print ' s:', res_dual print ' e_dual:', e_dual break # new_rho =", "np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw)) self.U =", "e_dual:', e_dual break # new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho", "self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5))", "try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as", "',self.lamb self.status = 'Incomplete: max iterations reached' t1 = time.time() for i in", "c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q = self.computePartialCorrelation(self.theta) def", "np.transpose(a), atol=tol) # In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D =", "i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in", "np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for", "np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def", "0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a,", "saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,))", "else: raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if i", "of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m", "in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta =", "scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \"", "Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre", "# if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if i != 0:", "In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii =", "range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] =", "self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol) # In[] update", "c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta", "r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1", "= self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1 =", "r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm", "[] self.objlist = [] # In[] help/debug methods def computePartialCorrelation(self, T): P =", "mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean = 0 A[i,j] = mean", "stop = (res_pri <= e_pri) and (res_dual <= e_dual) return (stop, res_pri, e_pri,", "!= 0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose)", "self.lamb = lamb self.length = nw*(nw+1)/2 #vector length of trianglular matrix self.S =", "self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre", "= np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T self.hatZ", "= [] # In[] help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw", "= self.P-P_pre s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS)", "s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri =", "7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations reached' t1 = time.time()", "# \\Gamma in the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw))", "self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime", "import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb =", "self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): print 'Encounter LinAlgError:", "= (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ')", "' e_pri:', e_pri print ' s:', res_dual print ' e_dual:', e_dual break #", "np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular matrix,", "self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in", "update_theta(self): theta = np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i", "= self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal' if verbose:", "based translation function import numpy as np import copy import time class myADMMSolver:", "= self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ allR =", "computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0] for i in range(nw): for", "self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U", "def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T =", "self.P-P_pre s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri", "np import copy import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho =", "[] self.eprilist = [] self.edualist = [] self.objlist = [] # In[] help/debug", "np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw))", "s3 = self.V-V_pre s4 = self.P-P_pre s5 = self.hatP - hatP_pre allS =", "# P & V self.plist = [] self.dlist = [] self.eprilist = []", "self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D)", "self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0", "str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM' break else: raise # if", "= np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D", "= mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw):", "= open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult =", "- hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw * e_abs", "= nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta =", "i if i != 0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre,", "atol=tol) # In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D)", "copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta) hat_theta_pre = copy.deepcopy(self.hat_theta) P_pre = copy.deepcopy(self.P)", "= 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho / new_rho self.rho", "scale = self.rho / new_rho self.rho = new_rho self.U = scale*self.U self.Y =", "in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean = 0 A[i,j]", "self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j]", "self.rho / new_rho self.rho = new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY", "P & V self.plist = [] self.dlist = [] self.eprilist = [] self.edualist", "self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY =", "self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 =", "self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT = np.zeros((nw,nw))", "np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i in range(self.nw): theta[i,i]", "self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal' if verbose: print", "= [] self.edualist = [] self.objlist = [] # In[] help/debug methods def", "self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose):", "LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) #", "& V self.plist = [] self.dlist = [] self.eprilist = [] self.edualist =", "self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): print", "-self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5 = self.hatP-self.hatQ", "t1 = time.time() for i in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q)", "= np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,)) retVal[:,1] = np.reshape(hatresult,(self.length,)) return retVal", "CPM-C model with parital correlation based translation function import numpy as np import", "range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta =", "' s:', res_dual print ' e_dual:', e_dual break # new_rho = self.rho threshold", "matrix' in str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM' break else: raise", "= np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def update_theta(self):", "= self.rho / new_rho self.rho = new_rho self.U = scale*self.U self.Y = scale*self.Y", "copy import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb", "range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ) theta_pre = copy.deepcopy(self.theta)", "<reponame>qingzheli/partial-correlation-based-contrast-pattern-mining<gh_stars>1-10 #For ICDM review only, please do not distribute #ALL RIGHTS RESERVED #ADMM", "D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self):", "class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length", "= self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] =", "model with parital correlation based translation function import numpy as np import copy", "#vector length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw =", "only, please do not distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C model", "= hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert", "i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2", "norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri", "# In[] help/debug methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0]", "* e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual =", "break else: raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if", "# In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii", "self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho)", "= self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U =", "for i in range(admmMaxIters): self.iter = i Q_pre = copy.deepcopy(self.Q) hatQ_pre = copy.deepcopy(self.hatQ)", "= x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw))", "'Singular matrix' in str(err): print 'Encounter LinAlgError: Singular matrix, exit ADMM' break else:", "translation function import numpy as np import copy import time class myADMMSolver: def", "# if mean<0: # mean = 0 A[i,j] = mean A[j,i] = mean", "self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw))", "' e_dual:', e_dual break # new_rho = self.rho threshold = 10 if (res_pri>threshold*res_dual):", "= self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel,", "= (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def", "self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n", "self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2", "#ALL RIGHTS RESERVED #ADMM solver for CPM-C model with parital correlation based translation", "xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean = 0 A[i,j] =", "matrix self.S = np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0]", "stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop:", "= self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5 =", "self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def", "= np.cov(np.transpose(x)) self.hatS = np.cov(np.transpose(hat_x)) self.nw = nw; self.m = x.shape[0] self.hatm =", "P = np.ones(T.shape) nw = T.shape[0] for i in range(nw): for j in", "self.V-V_pre s4 = self.P-P_pre s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s", "lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations reached' t1 = time.time() for", "e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho", "i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]-", "new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho / new_rho", "__call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7: lambdaADMM =", "variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho) self.T", "A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1", "def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1 assert", "model 7: lambdaADMM = ',self.lamb self.status = 'Incomplete: max iterations reached' t1 =", "verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7: lambdaADMM = ',self.lamb self.status =", "at Iteration \",i print ' r:', res_pri print ' e_pri:', e_pri print '", "for i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y", "mean A[j,i] = mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i", "= 0 A[i,j] = mean A[j,i] = mean return A def update_P(self): self.P", "import numpy as np import copy import time class myADMMSolver: def __init__(self,lamb, nw,", "np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D", "= np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z =", "'Incomplete: max iterations reached', i if i != 0: stop, res_pri, e_pri, res_dual,", "assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V = self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert", "[] self.edualist = [] self.objlist = [] # In[] help/debug methods def computePartialCorrelation(self,", "retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] = np.reshape(result,(self.length,)) retVal[:,1]", "= self.V-V_pre s4 = self.P-P_pre s5 = self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho", "for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] =", "elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho / new_rho self.rho = new_rho", "\\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw))", "please do not distribute #ALL RIGHTS RESERVED #ADMM solver for CPM-C model with", "res_pri, e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print", "np.sqrt((self.nw**2)) * e_abs + e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual", "A[i,j] = mean A[j,i] = mean return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2", "= norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 =", "= self.rho threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho", "= norm(allS) e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual", "print 'Encounter LinAlgError: Singular matrix, exit ADMM' break else: raise # if i>=admmMaxIters-1:", "nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector length", "def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a),", "= time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime =", "= 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw):", "self.length = nw*(nw+1)/2 #vector length of trianglular matrix self.S = np.cov(np.transpose(x)) self.hatS =", "hatP_pre = copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P()", "eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal' if verbose: print \"Admm stop", "self.nw = nw; self.m = x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta", "def update_theta(self): theta = np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for", "update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i", "self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual <= e_dual) return (stop, res_pri,", "print ' r:', res_pri print ' e_pri:', e_pri print ' s:', res_dual print", "norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre", "= hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw))", "self.rho/2.0 scale = self.rho / new_rho self.rho = new_rho self.U = scale*self.U self.Y", "rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector length of trianglular matrix self.S", "np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT =", "np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta &", "update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP)", "range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i", "i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if i != 0: stop, res_pri,", "LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T)", "= self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q r5", "(self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def", "= self.P-self.Q r5 = self.hatP-self.hatQ allR = np.concatenate((r1,r2,r3,r4,r5)) norm = np.linalg.norm r =", "= np.zeros((nw,nw)) # P & V self.plist = [] self.dlist = [] self.eprilist", "self.U = scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ", "return A def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] =", "self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual <= e_dual)", "verbose) if stop: self.status = 'Optimal' if verbose: print \"Admm stop early at", "(t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close()", "# print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal", "<= e_pri) and (res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) #", "self.Y = np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T", "res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri", "= scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 =", "+ e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r)", "res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status", "if mean<0: # mean = 0 A[i,j] = mean A[j,i] = mean return", "1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i]", "= s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual", "self.rho*(self.P-self.hatP-self.U)/(2*self.lamb+self.rho) assert self.check_symmetric(self.V) assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i in", "solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7:", "RIGHTS RESERVED #ADMM solver for CPM-C model with parital correlation based translation function", "self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V self.plist = []", "= rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector length of trianglular matrix", "= scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if", "'Incomplete: max iterations reached' t1 = time.time() for i in range(admmMaxIters): self.iter =", "np.zeros((nw,nw)) self.hatY = np.zeros((nw,nw)) self.Z = np.zeros((nw,nw)) # theta & T self.hatZ =", "reached' t1 = time.time() for i in range(admmMaxIters): self.iter = i Q_pre =", "self.eprilist = [] self.edualist = [] self.objlist = [] # In[] help/debug methods", "copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals()", "avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+'", "\\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y = np.zeros((nw,nw)) self.hatY", "for i in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP =", "self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1", "= self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5 = self.hatP - hatP_pre", "hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw * e_abs +", "np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for", "# \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP = np.zeros((nw,nw)) self.V = np.zeros((nw,nw)) self.Y =", "open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA()", "+ e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel *", "P[i,j] return P def h(self,theta,S,m): return 0.5*m*(np.trace(np.dot(S,theta))-np.log(np.linalg.det(theta))) def obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return", "in xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: #", "self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular", "#ADMM solver for CPM-C model with parital correlation based translation function import numpy", "def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for", "= np.dot(np.dot(D,np.diag(theii)),D.T) # self.objT.append(self.objective_T()) def update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii", "e_pri:', e_pri print ' s:', res_dual print ' e_dual:', e_dual break # new_rho", "hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in", "methods def computePartialCorrelation(self, T): P = np.ones(T.shape) nw = T.shape[0] for i in", "i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j in", "hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c =", "print ' e_dual:', e_dual break # new_rho = self.rho threshold = 10 if", "scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1)", "saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0]", "D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT = np.dot(np.dot(D,np.diag(theii)),D.T) # self.objhatT.append(self.objective_hatT()) def", "update_hatT(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.hat_theta-self.hatZ)-self.hatm*self.hatS) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+ 8*self.rho*self.hatm))/(4*self.rho) self.hatT =", "') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult = np.asmatrix(self.hatT[np.triu_indices(self.nw)]).T.getA() retVal[:,0] =", "ICDM review only, please do not distribute #ALL RIGHTS RESERVED #ADMM solver for", "xrange(n): for j in xrange(i+1,n): mean = (A[i,j]+A[j,i])/2 # if mean<0: # mean", "c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j]) hat_theta[i,j] = (self.hatT[i,j]+self.hatZ[i,j]-c*self.hatP[i,j]-c*self.hatY[i,j])/(1+c**2) hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert", "T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V self.plist =", "= np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw):", "time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime if self.nw>=50: saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a')", "2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0 scale = self.rho / new_rho self.rho =", "i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) # def update_duals(self): self.Y =", "# mean = 0 A[i,j] = mean A[j,i] = mean return A def", "0 A[i,j] = mean A[j,i] = mean return A def update_P(self): self.P =", "& T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V self.plist", "= self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw", "= np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw):", "nw = T.shape[0] for i in range(nw): for j in range(i+1,nw): P[i,j] =", "self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual <= e_dual) return (stop,", "self.computePartialCorrelation(self.theta) def update_hat_theta(self): hat_theta = np.eye(self.nw) for i in range(self.nw): hat_theta[i,i] = self.hatT[i,i]+self.hatZ[i,i]", "self.edualist = [] self.objlist = [] # In[] help/debug methods def computePartialCorrelation(self, T):", "return np.allclose(a, np.transpose(a), atol=tol) # In[] update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S)", "obj_overall(self): V = self.computePartialCorrelation(self.T)-self.computePartialCorrelation(self.hatT) return self.h(self.T,self.S,self.m)+self.h(self.hatT,self.hatS,self.hatm)+self.lamb*(np.linalg.norm(V)**2) def check_symmetric(self,a, tol=1e-3): return np.allclose(a, np.transpose(a), atol=tol)", "res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): # print '\\n solver", "reached', i if i != 0: stop, res_pri, e_pri, res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre,", "time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho self.lamb = lamb", "new_rho self.U = scale*self.U self.Y = scale*self.Y self.hatY = scale*self.hatY self.Z = scale*self.Z", "self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if", "theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal' if verbose: print \"Admm", "=np.zeros((nw,nw)) # \\Gamma in the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P=", "T): P = np.ones(T.shape) nw = T.shape[0] for i in range(nw): for j", "res_dual, e_dual = self.CheckConvergence(Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, eps_abs, eps_rel, verbose) if stop: self.status = 'Optimal'", "e_rel * (np.sqrt(self.rho *( norm(self.Z)**2+norm(self.hatZ)**2+norm(self.Y)**2+norm(self.hatY)**2+norm(self.U)**2))) res_pri = r res_dual = s self.plist.append(r) self.dlist.append(s)", "theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j] self.theta = theta assert self.check_symmetric(self.theta) self.Q", "self.objhatT.append(self.objective_hatT()) def update_theta(self): theta = np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i]", "= (res_pri <= e_pri) and (res_dual <= e_dual) return (stop, res_pri, e_pri, res_dual,", "r res_dual = s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri)", "assert np.linalg.norm(np.diag(self.V))==0 def proj2Symmetric(self,A): n = A.shape[0] for i in xrange(n): for j", "update variables def update_T(self): LAMBDA,D = np.linalg.eigh(2*self.rho*(self.theta-self.Z)-self.m*self.S) D = np.matrix(D) theii = (LAMBDA+np.sqrt(LAMBDA**2+8*self.rho*self.m))/(4*self.rho)", "e_dual) return (stop, res_pri, e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel,", "np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ = np.zeros((nw,nw)) self.T =np.zeros((nw,nw)) # \\Gamma in the paper", "self.status = 'Incomplete: max iterations reached' t1 = time.time() for i in range(admmMaxIters):", "update_duals(self): self.Y = self.P-self.Q+self.Y self.hatY = self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ", "= copy.deepcopy(self.hatP) V_pre = copy.deepcopy(self.V) try: self.update_T() self.update_hatT() self.update_theta() self.update_hat_theta() self.update_V() self.update_P() self.update_hatP()", "(stop, res_pri, e_pri, res_dual, e_dual) # solve def __call__(self, eps_abs, eps_rel, verbose,admmMaxIters=1000): #", "self.rho = rho self.lamb = lamb self.length = nw*(nw+1)/2 #vector length of trianglular", "theta = np.eye(self.nw) for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in", "threshold = 10 if (res_pri>threshold*res_dual): new_rho = 2*self.rho elif (threshold*res_pri<res_dual): new_rho = self.rho/2.0", "self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for i in range(self.nw): self.hatP[i,i] = 1 assert self.check_symmetric(self.hatP) #", "self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre, theta_pre,hat_theta_pre,V_pre,P_pre,hatP_pre, e_abs, e_rel, verbose): r1 = self.T", "self.hatP - hatP_pre allS = np.concatenate((s1,s2,s3,s4,s5))*self.rho s = norm(allS) e_pri = self.nw *", "x.shape[0] self.hatm = hat_x.shape[0] self.theta = np.zeros((nw,nw)) self.hat_theta = np.zeros((nw,nw)) self.Q= np.zeros((nw,nw)) self.hatQ", "self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print \" avgPerADMMIterTime\",avgIterTime", "= np.ones(T.shape) nw = T.shape[0] for i in range(nw): for j in range(i+1,nw):", "saveTime = open('data/KDD/efficiency_nw/model6Time_nw'+str(self.nw)+'.txt','a') saveTime.write(str(avgIterTime)+' ') saveTime.close() retVal = np.zeros([self.length,2]) result = np.asmatrix(self.T[np.triu_indices(self.nw)]).T.getA() hatresult", "= scale*self.hatY self.Z = scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime =", "= np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) # P & V self.plist = [] self.dlist", "def update_P(self): self.P = (self.V+self.hatP+self.U+self.Q-self.Y)/2 for i in range(self.nw): self.P[i,i] = 1 assert", "import copy import time class myADMMSolver: def __init__(self,lamb, nw, rho,x,hat_x,rho_update_func=None): self.rho = rho", "r1 = self.T -self.theta r2 = self.hatT-self.hat_theta r3 = self.V-self.P+self.hatP r4 = self.P-self.Q", "mean<0: # mean = 0 A[i,j] = mean A[j,i] = mean return A", "raise # if i>=admmMaxIters-1: print 'Incomplete: max iterations reached', i if i !=", "= np.zeros((nw,nw)) # theta & T self.hatZ = np.zeros((nw,nw)) self.U = np.zeros((nw,nw)) #", "j in range(i+1,nw): P[i,j] = -T[i,j]/np.sqrt(T[i,i]*T[j,j]) P[j,i] = P[i,j] return P def h(self,theta,S,m):", "= scale*self.Z self.hatZ = scale*self.hatZ t2 = time.time() avgIterTime = (t2-t1)/(self.iter+1) # print", "norm(allS) e_pri = self.nw * e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual =", "s self.plist.append(r) self.dlist.append(s) self.eprilist.append(e_pri) self.edualist.append(e_dual) stop = (res_pri <= e_pri) and (res_dual <=", "= self.hatT[i,i]+self.hatZ[i,i] for i in range(self.nw): for j in range(i+1,self.nw): c = 1/np.sqrt(hat_theta[i,i]*hat_theta[j,j])", "in range(self.nw): self.P[i,i] = 1 assert self.check_symmetric(self.P) def update_hatP(self): self.hatP = (self.P-self.V-self.U+self.hatQ-self.hatY)/2 for", "e_abs + e_rel * max(norm(self.theta),norm(self.hat_theta), norm(self.T)+norm(self.hatT),norm(self.P),norm(self.hatP),norm(self.V)) e_dual = np.sqrt((self.nw**2)) * e_abs + e_rel", "for i in range(self.nw): theta[i,i] = self.T[i,i]+self.Z[i,i] for i in range(self.nw): for j", "iterations reached', i if i != 0: stop, res_pri, e_pri, res_dual, e_dual =", "hat_theta[j,i] = hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self):", "np.linalg.norm r = norm(allR) s1 = self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre", "self.Q-Q_pre s2 = self.hatQ-hatQ_pre s3 = self.V-V_pre s4 = self.P-P_pre s5 = self.hatP", "eps_rel, verbose,admmMaxIters=1000): # print '\\n solver ADMM model 7: lambdaADMM = ',self.lamb self.status", "self.update_hat_theta() self.update_V() self.update_P() self.update_hatP() self.update_duals() self.objlist.append(self.obj_overall()) except np.linalg.LinAlgError as err: if 'Singular matrix'", "= hat_theta[i,j] self.hat_theta = hat_theta assert self.check_symmetric(self.hat_theta) self.hatQ = self.computePartialCorrelation(self.hat_theta) def update_V(self): self.V", "in the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma self.P= np.zeros((nw,nw)) self.hatP =", "self.T =np.zeros((nw,nw)) # \\Gamma in the paper self.hatT = np.zeros((nw,nw)) # \\hat \\Gamma", "= self.hatP-self.hatQ+self.hatY self.Z = self.T-self.theta+self.Z self.hatZ = self.hatT-self.hat_theta+self.hatZ self.U = self.V-self.P+self.hatP+self.U def CheckConvergence(self,Q_pre,hatQ_pre,", "j in range(i+1,self.nw): c = 1/np.sqrt(theta[i,i]*theta[j,j]) theta[i,j] = (self.T[i,j]+self.Z[i,j]- c*self.P[i,j]-c*self.Y[i,j])/(c**2+1) theta[j,i] = theta[i,j]" ]
[ "(3.6+), False to place in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap", "comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f: f.write(\"\".join(map(attrgetter(\"value\"), cst_list))) __all__ =", "to traverse the AST of the input file, extract the docstring out, parse", "traverse the AST of the input file, extract the docstring out, parse and", "the docstring out, parse and format to intended style, and emit \"\"\" from", "the docstrings found within provided filename to intended docstring_format :param filename: Python file", "whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace", "if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings, function", "word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list", "filename: ```str``` :param docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param", "&etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f: f.write(\"\".join(map(attrgetter(\"value\"), cst_list))) __all__ = [\"doctrans\"]", "extract the docstring out, parse and format to intended style, and emit \"\"\"", ":param no_word_wrap: Whether word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with", "no_word_wrap: Whether word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename,", "cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found", "import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from", "provided filename to intended docstring_format :param filename: Python file to convert docstrings within.", "```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source = f.read() node = ast_parse(original_source,", "= fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if", "and annotation assignments. # Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node)", "Edited in place. :type filename: ```str``` :param docstring_format: Format of docstring :type docstring_format:", "fix_missing_locations from copy import deepcopy from operator import attrgetter from cdd.ast_utils import cmp_ast", "function return annotations, assignment and annotation assignments. # Maintaining all other existing whitespace,", ":type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source = f.read() node", "original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations(", "place in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled (on", "# Carefully replace only docstrings, function return annotations, assignment and annotation assignments. #", "ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within provided", "attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans,", "and format to intended style, and emit \"\"\" from ast import fix_missing_locations from", "'google']``` :param type_annotations: True to have type annotations (3.6+), False to place in", "```str``` :param docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations:", "have type annotations (3.6+), False to place in docstring :type type_annotations: ```bool``` :param", "\"\"\" with open(filename, \"rt\") as f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False)", "deepcopy from operator import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse", "\"\"\" Transform the docstrings found within provided filename to intended docstring_format :param filename:", "DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\"", "assignment and annotation assignments. # Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list,", "import deepcopy from operator import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import", "within. Edited in place. :type filename: ```str``` :param docstring_format: Format of docstring :type", "doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform", "<reponame>SamuelMarks/docstring2class<gh_stars>0 \"\"\" Helper to traverse the AST of the input file, extract the", ":type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled (on emission). :type no_word_wrap:", "docstring_format :param filename: Python file to convert docstrings within. Edited in place. :type", "intended style, and emit \"\"\" from ast import fix_missing_locations from copy import deepcopy", "from ast import fix_missing_locations from copy import deepcopy from operator import attrgetter from", "deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node)", "\"\"\" Helper to traverse the AST of the input file, extract the docstring", "AST of the input file, extract the docstring out, parse and format to", "replace only docstrings, function return annotations, assignment and annotation assignments. # Maintaining all", "annotations, assignment and annotation assignments. # Maintaining all other existing whitespace, comments, &etc.", "type_annotations: True to have type annotations (3.6+), False to place in docstring :type", "existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully", "from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings", "found within provided filename to intended docstring_format :param filename: Python file to convert", "= ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is", "file, extract the docstring out, parse and format to intended style, and emit", "other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f: f.write(\"\".join(map(attrgetter(\"value\"),", "cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename,", "f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format,", "'numpydoc', 'google']``` :param type_annotations: True to have type annotations (3.6+), False to place", "return annotations, assignment and annotation assignments. # Maintaining all other existing whitespace, comments,", "annotation assignments. # Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with", "docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within provided filename to intended", "import fix_missing_locations from copy import deepcopy from operator import attrgetter from cdd.ast_utils import", "and emit \"\"\" from ast import fix_missing_locations from copy import deepcopy from operator", "Whether word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\")", "to have type annotations (3.6+), False to place in docstring :type type_annotations: ```bool```", "Python file to convert docstrings within. Edited in place. :type filename: ```str``` :param", "as f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node", "import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within", "Carefully replace only docstrings, function return annotations, assignment and annotation assignments. # Maintaining", "Transform the docstrings found within provided filename to intended docstring_format :param filename: Python", "disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source", "only docstrings, function return annotations, assignment and annotation assignments. # Maintaining all other", "operator import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils", "docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module):", "= deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module,", "cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer", ":param docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True", "skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations,", "word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as", "emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source = f.read()", "parse and format to intended style, and emit \"\"\" from ast import fix_missing_locations", "cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations", "docstrings, function return annotations, assignment and annotation assignments. # Maintaining all other existing", "to intended style, and emit \"\"\" from ast import fix_missing_locations from copy import", "emit \"\"\" from ast import fix_missing_locations from copy import deepcopy from operator import", "type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within provided filename to intended docstring_format", "no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source = f.read() node =", "of the input file, extract the docstring out, parse and format to intended", "cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings, function return annotations,", "docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled (on emission). :type", "in place. :type filename: ```str``` :param docstring_format: Format of docstring :type docstring_format: ```Literal['rest',", "from operator import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from", ":param type_annotations: True to have type annotations (3.6+), False to place in docstring", "# Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\")", "assignments. # Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename,", "whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f: f.write(\"\".join(map(attrgetter(\"value\"), cst_list))) __all__", "cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse", "filename to intended docstring_format :param filename: Python file to convert docstrings within. Edited", "from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format,", ") if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings,", "node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) )", "intended docstring_format :param filename: Python file to convert docstrings within. Edited in place.", "style, and emit \"\"\" from ast import fix_missing_locations from copy import deepcopy from", "from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import", "from copy import deepcopy from operator import attrgetter from cdd.ast_utils import cmp_ast from", "with open(filename, \"rt\") as f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module", "file to convert docstrings within. Edited in place. :type filename: ```str``` :param docstring_format:", "docstrings found within provided filename to intended docstring_format :param filename: Python file to", "DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node,", "is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list =", "list(cst_parse(original_source)) # Carefully replace only docstrings, function return annotations, assignment and annotation assignments.", "docstrings within. Edited in place. :type filename: ```str``` :param docstring_format: Format of docstring", "is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f:", "the input file, extract the docstring out, parse and format to intended style,", "node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap", "to place in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled", "f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node =", "out, parse and format to intended style, and emit \"\"\" from ast import", "ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None,", "cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings, function return annotations, assignment and", "ast import fix_missing_locations from copy import deepcopy from operator import attrgetter from cdd.ast_utils", "type annotations (3.6+), False to place in docstring :type type_annotations: ```bool``` :param no_word_wrap:", "open(filename, \"rt\") as f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module =", "docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to", "Maintaining all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as", "```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have type annotations (3.6+), False to", "not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings, function return", "existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f: f.write(\"\".join(map(attrgetter(\"value\"), cst_list)))", "True to have type annotations (3.6+), False to place in docstring :type type_annotations:", "place. :type filename: ```str``` :param docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc',", "of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have type", "all other existing whitespace, comments, &etc. doctransify_cst(cst_list, node) with open(filename, \"wt\") as f:", "Helper to traverse the AST of the input file, extract the docstring out,", "to intended docstring_format :param filename: Python file to convert docstrings within. Edited in", ":type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have type annotations (3.6+),", "False to place in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is", "has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the", "to convert docstrings within. Edited in place. :type filename: ```str``` :param docstring_format: Format", "import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def", "= f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node) node = fix_missing_locations( DocTrans(", "\"\"\" from ast import fix_missing_locations from copy import deepcopy from operator import attrgetter", "import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations, no_word_wrap):", "in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled (on emission).", "from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import DocTrans, doctransify_cst,", "format to intended style, and emit \"\"\" from ast import fix_missing_locations from copy", "annotations (3.6+), False to place in docstring :type type_annotations: ```bool``` :param no_word_wrap: Whether", "copy import deepcopy from operator import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst", "cdd.doctrans_utils import DocTrans, doctransify_cst, has_type_annotations from cdd.source_transformer import ast_parse def doctrans(filename, docstring_format, type_annotations,", ").visit(node) ) if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only", "within provided filename to intended docstring_format :param filename: Python file to convert docstrings", "docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have type annotations", "```bool``` :param no_word_wrap: Whether word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\"", "def doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within provided filename", "= list(cst_parse(original_source)) # Carefully replace only docstrings, function return annotations, assignment and annotation", "Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have", "the AST of the input file, extract the docstring out, parse and format", "(on emission). :type no_word_wrap: ```Optional[Literal[True]]``` \"\"\" with open(filename, \"rt\") as f: original_source =", ":param filename: Python file to convert docstrings within. Edited in place. :type filename:", ":type filename: ```str``` :param docstring_format: Format of docstring :type docstring_format: ```Literal['rest', 'numpydoc', 'google']```", "None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source))", "type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not cmp_ast(node, original_module): cst_list = list(cst_parse(original_source)) #", "\"rt\") as f: original_source = f.read() node = ast_parse(original_source, skip_docstring_remit=False) original_module = deepcopy(node)", "doctrans(filename, docstring_format, type_annotations, no_word_wrap): \"\"\" Transform the docstrings found within provided filename to", "original_module = deepcopy(node) node = fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node),", "no_word_wrap): \"\"\" Transform the docstrings found within provided filename to intended docstring_format :param", "input file, extract the docstring out, parse and format to intended style, and", "fix_missing_locations( DocTrans( docstring_format=docstring_format, word_wrap=no_word_wrap is None, type_annotations=type_annotations, existing_type_annotations=has_type_annotations(node), whole_ast=original_module, ).visit(node) ) if not", "docstring_format: ```Literal['rest', 'numpydoc', 'google']``` :param type_annotations: True to have type annotations (3.6+), False", "docstring out, parse and format to intended style, and emit \"\"\" from ast", "type_annotations: ```bool``` :param no_word_wrap: Whether word-wrap is disabled (on emission). :type no_word_wrap: ```Optional[Literal[True]]```", "original_module): cst_list = list(cst_parse(original_source)) # Carefully replace only docstrings, function return annotations, assignment", "filename: Python file to convert docstrings within. Edited in place. :type filename: ```str```", "import attrgetter from cdd.ast_utils import cmp_ast from cdd.cst import cst_parse from cdd.doctrans_utils import", "convert docstrings within. Edited in place. :type filename: ```str``` :param docstring_format: Format of" ]
[ "\"Grabbing new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo", "from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if", "with open(filename, 'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print", "print \"Downloaded all new photos from the recent photo feed that match your", "(\"You must configure config.py.template and rename\\n\" \" it to config.py to use this", "download %s\"%filename page += 1 if page > recentPhotos['photos']['pages']: break print \"Downloaded all", "to download %s\"%filename page += 1 if page > recentPhotos['photos']['pages']: break print \"Downloaded", "os import simplejson # install with pip try: import config except: print (\"You", "search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in", "config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already been downloaded.", "config.searches: page = 1 while True: print \"Grabbing new page from Flikr API\"", "\"Photo '%s' has already been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image", "to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page", "import urllib, signal, os import simplejson # install with pip try: import config", "try: import config except: print (\"You must configure config.py.template and rename\\n\" \" it", "\"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1 while True: print \"Grabbing new", "raise except: print \"Failed to download %s\"%filename page += 1 if page >", "urllib, signal, os import simplejson # install with pip try: import config except:", "print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print", "for photo in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in photo and", "f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename", "in photo and 'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename):", "flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1 while True: print", "it to config.py to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search", "print \"Failed to download %s\"%filename page += 1 if page > recentPhotos['photos']['pages']: break", "'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s'", "image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt:", "install with pip try: import config except: print (\"You must configure config.py.template and", "already been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with", "and rename\\n\" \" it to config.py to use this tool.\") exit() flikrApiUrl =", "Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as", "print \"Photo '%s' has already been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename", "except KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename page += 1 if", "= simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in photo and", "> recentPhotos['photos']['pages']: break print \"Downloaded all new photos from the recent photo feed", "break print \"Downloaded all new photos from the recent photo feed that match", "True: print \"Grabbing new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read())", "recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in photo and config.matchesCriteria(photo): filename =", "if page > recentPhotos['photos']['pages']: break print \"Downloaded all new photos from the recent", "in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has", "recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in photo", "KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename page += 1 if page", "\"Failed to download %s\"%filename page += 1 if page > recentPhotos['photos']['pages']: break print", "print \"Grabbing new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for", "page > recentPhotos['photos']['pages']: break print \"Downloaded all new photos from the recent photo", "config.py.template and rename\\n\" \" it to config.py to use this tool.\") exit() flikrApiUrl", "try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image)", "open(filename, 'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed", "1 if page > recentPhotos['photos']['pages']: break print \"Downloaded all new photos from the", "= 1 while True: print \"Grabbing new page from Flikr API\" recentPhotos =", "simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id'", "as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed to download", "+= 1 if page > recentPhotos['photos']['pages']: break print \"Downloaded all new photos from", "config.py to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches:", "filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already been downloaded. Ignoring.\"%filename", "photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already", "f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename page", "new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in", "to config.py to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in", "has already been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read()", "for search in config.searches: page = 1 while True: print \"Grabbing new page", "= urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise", "been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename,", "print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename page +=", "\" it to config.py to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for", "exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1 while True:", "page = 1 while True: print \"Grabbing new page from Flikr API\" recentPhotos", "print (\"You must configure config.py.template and rename\\n\" \" it to config.py to use", "'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except: print \"Failed to", "and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already been", "else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f:", "configure config.py.template and rename\\n\" \" it to config.py to use this tool.\") exit()", "with pip try: import config except: print (\"You must configure config.py.template and rename\\n\"", "in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in photo and config.matchesCriteria(photo): filename", "\"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print \"Done.\"", "search in config.searches: page = 1 while True: print \"Grabbing new page from", "pip try: import config except: print (\"You must configure config.py.template and rename\\n\" \"", "if os.path.exists(filename): print \"Photo '%s' has already been downloaded. Ignoring.\"%filename else: try: print", "config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already been downloaded. Ignoring.\"%filename else: try:", "import config except: print (\"You must configure config.py.template and rename\\n\" \" it to", "except: print \"Failed to download %s\"%filename page += 1 if page > recentPhotos['photos']['pages']:", "Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o'", "page += 1 if page > recentPhotos['photos']['pages']: break print \"Downloaded all new photos", "\"Downloaded all new photos from the recent photo feed that match your criteria.\"", "downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+')", "except: print (\"You must configure config.py.template and rename\\n\" \" it to config.py to", "= config.downloadAs(photo) if os.path.exists(filename): print \"Photo '%s' has already been downloaded. Ignoring.\"%filename else:", "while True: print \"Grabbing new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search,", "<gh_stars>0 import urllib, signal, os import simplejson # install with pip try: import", "urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print \"Done.\" except KeyboardInterrupt: raise except:", "%s\"%filename image = urllib.urlopen(photo['url_o']).read() with open(filename, 'w+') as f: f.write(image) print \"Done.\" except", "must configure config.py.template and rename\\n\" \" it to config.py to use this tool.\")", "page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in photo", "config except: print (\"You must configure config.py.template and rename\\n\" \" it to config.py", "and 'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print \"Photo", "use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page =", "recentPhotos['photos']['pages']: break print \"Downloaded all new photos from the recent photo feed that", "in config.searches: page = 1 while True: print \"Grabbing new page from Flikr", "tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1 while", "# install with pip try: import config except: print (\"You must configure config.py.template", "%s\"%filename page += 1 if page > recentPhotos['photos']['pages']: break print \"Downloaded all new", "'%s' has already been downloaded. Ignoring.\"%filename else: try: print \"Downloading %s\"%filename image =", "'url_o' in photo and 'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if", "\"Done.\" except KeyboardInterrupt: raise except: print \"Failed to download %s\"%filename page += 1", "API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']: if 'url_o' in", "os.path.exists(filename): print \"Photo '%s' has already been downloaded. Ignoring.\"%filename else: try: print \"Downloading", "page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl, search, page)).read()) for photo in recentPhotos['photos']['photo']:", "signal, os import simplejson # install with pip try: import config except: print", "= \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1 while True: print \"Grabbing", "1 while True: print \"Grabbing new page from Flikr API\" recentPhotos = simplejson.loads(urllib.urlopen(\"%s&text=%s&page=%s\"%(flikrApiUrl,", "photo in recentPhotos['photos']['photo']: if 'url_o' in photo and 'id' in photo and config.matchesCriteria(photo):", "simplejson # install with pip try: import config except: print (\"You must configure", "rename\\n\" \" it to config.py to use this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\"", "photo and 'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo) if os.path.exists(filename): print", "import simplejson # install with pip try: import config except: print (\"You must", "if 'url_o' in photo and 'id' in photo and config.matchesCriteria(photo): filename = config.downloadAs(photo)", "this tool.\") exit() flikrApiUrl = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=2a5a2148ffc21588facb65536aa91b7d&extras=dims_o%2Curl_o%2Cviews&per_page=500&media=photos&format=json&nojsoncallback=1\" for search in config.searches: page = 1" ]
[ "AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b'", "self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version =", "super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name =", "from .base import SetupPyRecipe class AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs)", "*args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3']", "__init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons =", "def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons", "self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs'", "\\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version = '18.1.0' self.url =", "'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version = '18.1.0' self.url = 'https://files.pythonhosted.org/packages/e4/ac/a04671e118b57bee87dabca1e0f2d3bda816b7a551036012d0ca24190e71/attrs-18.1.0.tar.gz'", "**kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name", "**kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version", ".base import SetupPyRecipe class AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256", "'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version = '18.1.0' self.url", "import SetupPyRecipe class AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 =", "SetupPyRecipe class AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a'", "= 'e0d0eb91441a3b53dab4d9b743eafc1a' \\ 'c44476296a2053b6ca3af0b139faf87b' self.pythons = ['python3'] self.name = 'attrs' self.version = '18.1.0'", "class AttrsRecipe(SetupPyRecipe): def __init__(self, *args, **kwargs): super(AttrsRecipe, self).__init__(*args, **kwargs) self.sha256 = 'e0d0eb91441a3b53dab4d9b743eafc1a' \\" ]
[ "# 스택 생성자 호출 stacks = Stack() # for문 종료 flag 생성 flag_true", "if ch == ')' and stacks.top() == '(': stacks.pop() continue if ch ==", "# 반복구문 생성 while True: # 텍스트 입력 저장 text = input().rstrip() #", "tmp # 반복구문 생성 while True: # 텍스트 입력 저장 text = input().rstrip()", "== '[': stacks.pop() continue if flag_true == True and len(stacks.stack) == 0: print('yes')", "== '(': stacks.pop() continue if ch == ']' and stacks.top() == '[': stacks.pop()", "종료 flag 생성 flag_true = True # for문 설계 # 1.text 내 있는", "생성 flag_true = True # for문 설계 # 1.text 내 있는 문자 호출", "if ch == ']' and stacks.top() == '[': stacks.pop() continue if flag_true ==", "return -1 else: return self.stack[-1] def pop(self): if len(self.stack) == 0: return -1", "] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop", "= self.stack.pop() return tmp # 반복구문 생성 while True: # 텍스트 입력 저장", "'.': break # 스택 생성자 호출 stacks = Stack() # for문 종료 flag", "else: tmp = self.stack.pop() return tmp # 반복구문 생성 while True: # 텍스트", "self.stack.append(data) def top(self): if len(self.stack) == 0: return -1 else: return self.stack[-1] def", "문자 호출 # 2. (, [ 가 나올 경우 스택에 push # 3.", "text: if ch == '(': stacks.push(ch) continue if ch == '[': stacks.push(ch) continue", "True: # 텍스트 입력 저장 text = input().rstrip() # break 조건 입력 if", "경우 for문 강제 종류 후 flag 판별 for ch in text: if ch", "class Stack(): def __init__(self): self.stack = [] def push(self, data): self.stack.append(data) def top(self):", "continue if ch == '[': stacks.push(ch) continue if ch == ')' and stacks.top()", "# text = 'So when I die (the [first] I will see in", "저장 text = input().rstrip() # break 조건 입력 if len(text) == 1 and", "ch == ']' and stacks.top() == '[': stacks.pop() continue if flag_true == True", "생성 while True: # 텍스트 입력 저장 text = input().rstrip() # break 조건", "파악하고(top 활용) 맞을 경우 pop # 4. 아닐 경우 for문 강제 종류 후", "and stacks.top() != '[': flag_true = False break if ch == ')' and", "stack 클래스 생성 class Stack(): def __init__(self): self.stack = [] def push(self, data):", "I will see in (heaven) is a score list).' # stack 클래스 생성", "!= '(': flag_true = False break if ch == ']' and stacks.top() !=", "stacks.pop() continue if flag_true == True and len(stacks.stack) == 0: print('yes') else: print('no')", "see in (heaven) is a score list).' # stack 클래스 생성 class Stack():", "입력 if len(text) == 1 and text[0] == '.': break # 스택 생성자", "if len(text) == 1 and text[0] == '.': break # 스택 생성자 호출", "= True # for문 설계 # 1.text 내 있는 문자 호출 # 2.", "맞을 경우 pop # 4. 아닐 경우 for문 강제 종류 후 flag 판별", "score list).' # stack 클래스 생성 class Stack(): def __init__(self): self.stack = []", "ch == '(': stacks.push(ch) continue if ch == '[': stacks.push(ch) continue if ch", "text = input().rstrip() # break 조건 입력 if len(text) == 1 and text[0]", "== ')' and stacks.top() != '(': flag_true = False break if ch ==", "= [] def push(self, data): self.stack.append(data) def top(self): if len(self.stack) == 0: return", "die (the [first] I will see in (heaven) is a score list).' #", "'(': stacks.push(ch) continue if ch == '[': stacks.push(ch) continue if ch == ')'", "# for문 종료 flag 생성 flag_true = True # for문 설계 # 1.text", "stacks.top() != '[': flag_true = False break if ch == ')' and stacks.top()", "내 있는 문자 호출 # 2. (, [ 가 나올 경우 스택에 push", "조건 입력 if len(text) == 1 and text[0] == '.': break # 스택", "나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop # 4.", "text = 'So when I die (the [first] I will see in (heaven)", "push(self, data): self.stack.append(data) def top(self): if len(self.stack) == 0: return -1 else: return", "True # for문 설계 # 1.text 내 있는 문자 호출 # 2. (,", "'[': flag_true = False break if ch == ')' and stacks.top() == '(':", "'(': flag_true = False break if ch == ']' and stacks.top() != '[':", "a score list).' # stack 클래스 생성 class Stack(): def __init__(self): self.stack =", "and stacks.top() == '(': stacks.pop() continue if ch == ']' and stacks.top() ==", "스택에 push # 3. ), ] 이 나올 경우 스택에 complement 기호가 있는지", "후 flag 판별 for ch in text: if ch == '(': stacks.push(ch) continue", "flag_true = False break if ch == ']' and stacks.top() != '[': flag_true", "complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop # 4. 아닐 경우 for문", "if ch == '[': stacks.push(ch) continue if ch == ')' and stacks.top() !=", "']' and stacks.top() == '[': stacks.pop() continue if flag_true == True and len(stacks.stack)", "top(self): if len(self.stack) == 0: return -1 else: return self.stack[-1] def pop(self): if", "스택 생성자 호출 stacks = Stack() # for문 종료 flag 생성 flag_true =", "'[': stacks.push(ch) continue if ch == ')' and stacks.top() != '(': flag_true =", "== 1 and text[0] == '.': break # 스택 생성자 호출 stacks =", "continue if ch == ')' and stacks.top() != '(': flag_true = False break", "= 'So when I die (the [first] I will see in (heaven) is", "and text[0] == '.': break # 스택 생성자 호출 stacks = Stack() #", "if len(self.stack) == 0: return -1 else: tmp = self.stack.pop() return tmp #", "I die (the [first] I will see in (heaven) is a score list).'", "강제 종류 후 flag 판별 for ch in text: if ch == '(':", "for문 종료 flag 생성 flag_true = True # for문 설계 # 1.text 내", "continue if ch == ']' and stacks.top() == '[': stacks.pop() continue if flag_true", "== 0: return -1 else: return self.stack[-1] def pop(self): if len(self.stack) == 0:", "is a score list).' # stack 클래스 생성 class Stack(): def __init__(self): self.stack", "경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop # 4. 아닐", "will see in (heaven) is a score list).' # stack 클래스 생성 class", "if ch == '(': stacks.push(ch) continue if ch == '[': stacks.push(ch) continue if", "text[0] == '.': break # 스택 생성자 호출 stacks = Stack() # for문", "), ] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우", "== '.': break # 스택 생성자 호출 stacks = Stack() # for문 종료", "if ch == ')' and stacks.top() != '(': flag_true = False break if", "있는지 파악하고(top 활용) 맞을 경우 pop # 4. 아닐 경우 for문 강제 종류", "for문 강제 종류 후 flag 판별 for ch in text: if ch ==", "== '(': stacks.push(ch) continue if ch == '[': stacks.push(ch) continue if ch ==", "def __init__(self): self.stack = [] def push(self, data): self.stack.append(data) def top(self): if len(self.stack)", "[ 가 나올 경우 스택에 push # 3. ), ] 이 나올 경우", "호출 stacks = Stack() # for문 종료 flag 생성 flag_true = True #", "== ']' and stacks.top() == '[': stacks.pop() continue if flag_true == True and", "스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop # 4. 아닐 경우", "pop # 4. 아닐 경우 for문 강제 종류 후 flag 판별 for ch", "# 1.text 내 있는 문자 호출 # 2. (, [ 가 나올 경우", "경우 pop # 4. 아닐 경우 for문 강제 종류 후 flag 판별 for", "')' and stacks.top() != '(': flag_true = False break if ch == ']'", "-1 else: return self.stack[-1] def pop(self): if len(self.stack) == 0: return -1 else:", "'So when I die (the [first] I will see in (heaven) is a", "in (heaven) is a score list).' # stack 클래스 생성 class Stack(): def", "(heaven) is a score list).' # stack 클래스 생성 class Stack(): def __init__(self):", "3. ), ] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을", "# 2. (, [ 가 나올 경우 스택에 push # 3. ), ]", "# 3. ), ] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용)", "and stacks.top() != '(': flag_true = False break if ch == ']' and", "return self.stack[-1] def pop(self): if len(self.stack) == 0: return -1 else: tmp =", "flag 판별 for ch in text: if ch == '(': stacks.push(ch) continue if", "data): self.stack.append(data) def top(self): if len(self.stack) == 0: return -1 else: return self.stack[-1]", "0: return -1 else: return self.stack[-1] def pop(self): if len(self.stack) == 0: return", "(the [first] I will see in (heaven) is a score list).' # stack", "생성자 호출 stacks = Stack() # for문 종료 flag 생성 flag_true = True", "self.stack.pop() return tmp # 반복구문 생성 while True: # 텍스트 입력 저장 text", "# 4. 아닐 경우 for문 강제 종류 후 flag 판별 for ch in", "stacks.push(ch) continue if ch == ')' and stacks.top() != '(': flag_true = False", "if ch == ']' and stacks.top() != '[': flag_true = False break if", "stacks.push(ch) continue if ch == '[': stacks.push(ch) continue if ch == ')' and", "self.stack[-1] def pop(self): if len(self.stack) == 0: return -1 else: tmp = self.stack.pop()", "# 텍스트 입력 저장 text = input().rstrip() # break 조건 입력 if len(text)", "# for문 설계 # 1.text 내 있는 문자 호출 # 2. (, [", "1.text 내 있는 문자 호출 # 2. (, [ 가 나올 경우 스택에", "1 and text[0] == '.': break # 스택 생성자 호출 stacks = Stack()", "tmp = self.stack.pop() return tmp # 반복구문 생성 while True: # 텍스트 입력", "False break if ch == ')' and stacks.top() == '(': stacks.pop() continue if", "__init__(self): self.stack = [] def push(self, data): self.stack.append(data) def top(self): if len(self.stack) ==", "(, [ 가 나올 경우 스택에 push # 3. ), ] 이 나올", "push # 3. ), ] 이 나올 경우 스택에 complement 기호가 있는지 파악하고(top", "list).' # stack 클래스 생성 class Stack(): def __init__(self): self.stack = [] def", "ch == '[': stacks.push(ch) continue if ch == ')' and stacks.top() != '(':", "[first] I will see in (heaven) is a score list).' # stack 클래스", "len(text) == 1 and text[0] == '.': break # 스택 생성자 호출 stacks", "[] def push(self, data): self.stack.append(data) def top(self): if len(self.stack) == 0: return -1", "stacks.top() == '[': stacks.pop() continue if flag_true == True and len(stacks.stack) == 0:", "break # 스택 생성자 호출 stacks = Stack() # for문 종료 flag 생성", "len(self.stack) == 0: return -1 else: return self.stack[-1] def pop(self): if len(self.stack) ==", "ch in text: if ch == '(': stacks.push(ch) continue if ch == '[':", "== ')' and stacks.top() == '(': stacks.pop() continue if ch == ']' and", "pop(self): if len(self.stack) == 0: return -1 else: tmp = self.stack.pop() return tmp", "텍스트 입력 저장 text = input().rstrip() # break 조건 입력 if len(text) ==", "설계 # 1.text 내 있는 문자 호출 # 2. (, [ 가 나올", "ch == ')' and stacks.top() == '(': stacks.pop() continue if ch == ']'", "아닐 경우 for문 강제 종류 후 flag 판별 for ch in text: if", "= Stack() # for문 종료 flag 생성 flag_true = True # for문 설계", "4. 아닐 경우 for문 강제 종류 후 flag 판별 for ch in text:", "판별 for ch in text: if ch == '(': stacks.push(ch) continue if ch", "def pop(self): if len(self.stack) == 0: return -1 else: tmp = self.stack.pop() return", "stacks.pop() continue if ch == ']' and stacks.top() == '[': stacks.pop() continue if", "for ch in text: if ch == '(': stacks.push(ch) continue if ch ==", "= False break if ch == ']' and stacks.top() != '[': flag_true =", "break if ch == ')' and stacks.top() == '(': stacks.pop() continue if ch", "while True: # 텍스트 입력 저장 text = input().rstrip() # break 조건 입력", "flag_true = False break if ch == ')' and stacks.top() == '(': stacks.pop()", "나올 경우 스택에 push # 3. ), ] 이 나올 경우 스택에 complement", "when I die (the [first] I will see in (heaven) is a score", "-1 else: tmp = self.stack.pop() return tmp # 반복구문 생성 while True: #", "')' and stacks.top() == '(': stacks.pop() continue if ch == ']' and stacks.top()", "def push(self, data): self.stack.append(data) def top(self): if len(self.stack) == 0: return -1 else:", "0: return -1 else: tmp = self.stack.pop() return tmp # 반복구문 생성 while", "stacks = Stack() # for문 종료 flag 생성 flag_true = True # for문", "반복구문 생성 while True: # 텍스트 입력 저장 text = input().rstrip() # break", "and stacks.top() == '[': stacks.pop() continue if flag_true == True and len(stacks.stack) ==", "ch == ']' and stacks.top() != '[': flag_true = False break if ch", "flag 생성 flag_true = True # for문 설계 # 1.text 내 있는 문자", "== ']' and stacks.top() != '[': flag_true = False break if ch ==", "False break if ch == ']' and stacks.top() != '[': flag_true = False", "'[': stacks.pop() continue if flag_true == True and len(stacks.stack) == 0: print('yes') else:", "입력 저장 text = input().rstrip() # break 조건 입력 if len(text) == 1", "'(': stacks.pop() continue if ch == ']' and stacks.top() == '[': stacks.pop() continue", "input().rstrip() # break 조건 입력 if len(text) == 1 and text[0] == '.':", "기호가 있는지 파악하고(top 활용) 맞을 경우 pop # 4. 아닐 경우 for문 강제", "있는 문자 호출 # 2. (, [ 가 나올 경우 스택에 push #", "else: return self.stack[-1] def pop(self): if len(self.stack) == 0: return -1 else: tmp", "break 조건 입력 if len(text) == 1 and text[0] == '.': break #", "stacks.top() == '(': stacks.pop() continue if ch == ']' and stacks.top() == '[':", "break if ch == ']' and stacks.top() != '[': flag_true = False break", "= input().rstrip() # break 조건 입력 if len(text) == 1 and text[0] ==", "활용) 맞을 경우 pop # 4. 아닐 경우 for문 강제 종류 후 flag", "in text: if ch == '(': stacks.push(ch) continue if ch == '[': stacks.push(ch)", "!= '[': flag_true = False break if ch == ')' and stacks.top() ==", "stacks.top() != '(': flag_true = False break if ch == ']' and stacks.top()", "if len(self.stack) == 0: return -1 else: return self.stack[-1] def pop(self): if len(self.stack)", "== 0: return -1 else: tmp = self.stack.pop() return tmp # 반복구문 생성", "for문 설계 # 1.text 내 있는 문자 호출 # 2. (, [ 가", "']' and stacks.top() != '[': flag_true = False break if ch == ')'", "Stack() # for문 종료 flag 생성 flag_true = True # for문 설계 #", "# break 조건 입력 if len(text) == 1 and text[0] == '.': break", "ch == ')' and stacks.top() != '(': flag_true = False break if ch", "== '[': stacks.push(ch) continue if ch == ')' and stacks.top() != '(': flag_true", "Stack(): def __init__(self): self.stack = [] def push(self, data): self.stack.append(data) def top(self): if", "# stack 클래스 생성 class Stack(): def __init__(self): self.stack = [] def push(self,", "경우 스택에 push # 3. ), ] 이 나올 경우 스택에 complement 기호가", "가 나올 경우 스택에 push # 3. ), ] 이 나올 경우 스택에", "= False break if ch == ')' and stacks.top() == '(': stacks.pop() continue", "생성 class Stack(): def __init__(self): self.stack = [] def push(self, data): self.stack.append(data) def", "len(self.stack) == 0: return -1 else: tmp = self.stack.pop() return tmp # 반복구문", "return -1 else: tmp = self.stack.pop() return tmp # 반복구문 생성 while True:", "return tmp # 반복구문 생성 while True: # 텍스트 입력 저장 text =", "종류 후 flag 판별 for ch in text: if ch == '(': stacks.push(ch)", "def top(self): if len(self.stack) == 0: return -1 else: return self.stack[-1] def pop(self):", "self.stack = [] def push(self, data): self.stack.append(data) def top(self): if len(self.stack) == 0:", "호출 # 2. (, [ 가 나올 경우 스택에 push # 3. ),", "2. (, [ 가 나올 경우 스택에 push # 3. ), ] 이", "flag_true = True # for문 설계 # 1.text 내 있는 문자 호출 #", "이 나올 경우 스택에 complement 기호가 있는지 파악하고(top 활용) 맞을 경우 pop #", "클래스 생성 class Stack(): def __init__(self): self.stack = [] def push(self, data): self.stack.append(data)" ]
[ "= pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__)", "flask.request.args.get(\"weight\", default=None, type=float) if age is None or weight is None: return flask.jsonify({", "model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\")", "return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x = [age, weight] blood_fat", "blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat }) if", "age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age is", "import flask import pandas as pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\")", "\"Bad Request\" }) x = [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\":", "<gh_stars>0 import flask import pandas as pd from sklearn.linear_model import LinearRegression train_X =", "= flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\",", "weight = flask.request.args.get(\"weight\", default=None, type=float) if age is None or weight is None:", "400, \"msg\": \"Bad Request\" }) x = [age, weight] blood_fat = model.predict(x)[0] return", "def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if", "\"msg\": \"Bad Request\" }) x = [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({", "None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x = [age, weight]", "age is None or weight is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad", "bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age", "flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None,", "if age is None or weight is None: return flask.jsonify({ \"code\": 400, \"msg\":", "return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat }) if __name__ == \"__main__\":", "app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None,", "train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def", "return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\",", "Request\" }) x = [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200,", "None or weight is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" })", "pandas as pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood", "is None or weight is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\"", "[age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat", "pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model", "= train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index():", "app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age =", "LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat():", "import pandas as pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y =", "train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age", "type=float) if age is None or weight is None: return flask.jsonify({ \"code\": 400,", "fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\")", "sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X,", "@app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float)", "flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x = [age, weight] blood_fat =", "default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age is None or weight", "= flask.request.args.get(\"weight\", default=None, type=float) if age is None or weight is None: return", "model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat }) if __name__ ==", "LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app", "}) x = [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\":", "= LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def", "= [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\":", "weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat })", "flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age is None or", "flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat }) if __name__ == \"__main__\": app.run(debug=True)", "default=None, type=float) if age is None or weight is None: return flask.jsonify({ \"code\":", "from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model =", "= model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\", \"result\": blood_fat }) if __name__", "type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age is None or weight is", "index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight =", "weight is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x =", "import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y)", "train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app =", "as pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\")", "pd.read_csv(\"blood_fat.csv\") train_y = train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\")", "train_X.pop(\"blood fat\") model = LinearRegression().fit(train_X, train_y) app = flask.Flask(__name__) @app.route(\"/\") def index(): return", "@app.route(\"/\") def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int)", "def index(): return app.send_static_file(\"index.html\") @app.route(\"/bloodfat\") def bloodfat(): age = flask.request.args.get(\"age\", default=None, type=int) weight", "= flask.request.args.get(\"age\", default=None, type=int) weight = flask.request.args.get(\"weight\", default=None, type=float) if age is None", "or weight is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x", "is None: return flask.jsonify({ \"code\": 400, \"msg\": \"Bad Request\" }) x = [age,", "\"code\": 400, \"msg\": \"Bad Request\" }) x = [age, weight] blood_fat = model.predict(x)[0]", "x = [age, weight] blood_fat = model.predict(x)[0] return flask.jsonify({ \"code\": 200, \"msg\": \"OK\",", "flask import pandas as pd from sklearn.linear_model import LinearRegression train_X = pd.read_csv(\"blood_fat.csv\") train_y" ]
[ "= ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request:", "django.forms.models import model_to_dict from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators", "from django.forms.models import model_to_dict from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from", "response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info", "def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(),", "import * from django.http.request import * from Sparrow.action.track import track from dal.models import", "return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success,", "from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from", "Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10)", "daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json')", "HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success',", "top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success, 'Success', top_active_apis_info) return HttpResponse(response.toJson(), content_type='application/json')", "ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import * from django.http.request import *", "import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import * from django.http.request import", "= ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request:", "HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo)", "@track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return", "from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import * from", "ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest):", "response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info", "dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import * from django.http.request", "HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success, 'Success',", "def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success, 'Success', top_active_apis_info) return HttpResponse(response.toJson(),", "import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import", "top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json')", "Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response", "from Sparrow.action.response import * from django.http.request import * from Sparrow.action.track import track from", "track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info =", "'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response", "top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response =", "dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response", "@track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success, 'Success', top_active_apis_info) return", "import * from Sparrow.action.track import track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo)", "ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info)", "from Sparrow.action.track import track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request:", "daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def", "* from Sparrow.action.track import track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def", "django.http.request import * from Sparrow.action.track import track from dal.models import * class ActionAction:", "* from django.http.request import * from Sparrow.action.track import track from dal.models import *", "from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14)", "= Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info =", "Sparrow.action.track import track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest):", "* from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required from Sparrow.action.response import *", "'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response", "content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info)", "import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response =", "Sparrow.action.response import * from django.http.request import * from Sparrow.action.track import track from dal.models", "daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response =", "from django.contrib.auth.decorators import login_required from Sparrow.action.response import * from django.http.request import * from", "class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success',", "def top_active_users_info(request: HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(),", "Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10)", "import track from dal.models import * class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info", "top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def", "model_to_dict from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import login_required", "import login_required from Sparrow.action.response import * from django.http.request import * from Sparrow.action.track import", "from django.http.request import * from Sparrow.action.track import track from dal.models import * class", "@track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success, 'Success', daily_active_info) return", "<filename>Sparrow/action/action_action.py<gh_stars>10-100 from django.forms.models import model_to_dict from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao", "django.contrib.auth.decorators import login_required from Sparrow.action.response import * from django.http.request import * from Sparrow.action.track", "content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success, 'Success', top_active_apis_info)", "* class ActionAction: @track(ActionType.ActionDailyActiveInfo) def daily_active_info(request: HttpRequest): daily_active_info = ActionDao.get_daily_active_info(14) response = Response(Success,", "HttpRequest): top_active_users_info = ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo)", "return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest): top_active_apis_info = ActionDao.get_top_active_apis_info(10) response = Response(Success,", "import model_to_dict from Sparrow.action.common_action import * from dal.dao.action_dao import ActionDao from django.contrib.auth.decorators import", "= Response(Success, 'Success', daily_active_info) return HttpResponse(response.to_json_with_mm_dd(), content_type='application/json') @track(ActionType.ActionTopActiveUserInfo) def top_active_users_info(request: HttpRequest): top_active_users_info =", "login_required from Sparrow.action.response import * from django.http.request import * from Sparrow.action.track import track", "ActionDao.get_top_active_users_info(10) response = Response(Success, 'Success', top_active_users_info) return HttpResponse(response.toJson(), content_type='application/json') @track(ActionType.ActionTopActiveApisInfo) def top_active_apis_info(request: HttpRequest):" ]
[ "Finder import requests class Book(Finder): def search_book(self, query): # search and get the", "import requests class Book(Finder): def search_book(self, query): # search and get the response", "search_book(self, query): # search and get the response resp = requests.get(self.open_library.replace(\"[query]\", query)).json()[\"docs\"] return", "class Book(Finder): def search_book(self, query): # search and get the response resp =", "dashboard.utils.finder import Finder import requests class Book(Finder): def search_book(self, query): # search and", "from dashboard.utils.finder import Finder import requests class Book(Finder): def search_book(self, query): # search", "requests class Book(Finder): def search_book(self, query): # search and get the response resp", "def search_book(self, query): # search and get the response resp = requests.get(self.open_library.replace(\"[query]\", query)).json()[\"docs\"]", "Book(Finder): def search_book(self, query): # search and get the response resp = requests.get(self.open_library.replace(\"[query]\",", "query): # search and get the response resp = requests.get(self.open_library.replace(\"[query]\", query)).json()[\"docs\"] return resp", "import Finder import requests class Book(Finder): def search_book(self, query): # search and get" ]
[ "import Manager __version__ = '2.0.1' __all__ = ( 'Doc', 'Manager', 'DocumentInitDataError', 'DocumentDoesNotExist', )", "import Doc from .exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ =", "from .manager import Manager __version__ = '2.0.1' __all__ = ( 'Doc', 'Manager', 'DocumentInitDataError',", ".doc import Doc from .exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__", "DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ = '2.0.1' __all__ = ( 'Doc',", "from .doc import Doc from .exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager", "DocumentInitDataError from .manager import Manager __version__ = '2.0.1' __all__ = ( 'Doc', 'Manager',", "from .exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ = '2.0.1' __all__", ".manager import Manager __version__ = '2.0.1' __all__ = ( 'Doc', 'Manager', 'DocumentInitDataError', 'DocumentDoesNotExist',", "Doc from .exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ = '2.0.1'", "import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ = '2.0.1' __all__ = (", ".exceptions import DocumentDoesNotExist, DocumentInitDataError from .manager import Manager __version__ = '2.0.1' __all__ =" ]
[ "for p in points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose)", "car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for p in points:", "file is licensed under MIT license. # See the LICENSE file in the", "test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def", "msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses", "test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index =", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]])", "arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]])", "self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4,", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\", 'test_msgs_helpers',", "poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2,", "def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5],", "np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0],", "test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self):", "from numpy.testing import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose,", "np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses)", "MIT license. # See the LICENSE file in the project root for more", "point) self.assertEqual(index, 1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\", 'test_msgs_helpers', TestMsgsHelpers) rosunit.unitrun(\"car_core\",", "point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ ==", "point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses", "rosunit import numpy as np from numpy.testing import assert_almost_equal from std_msgs.msg import Header", "See the LICENSE file in the project root for more information. import unittest", "information. import unittest import rostest import rosunit import numpy as np from numpy.testing", "[2,2], [3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def", "[2,2], [3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def", "msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr =", "[3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self):", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point =", "[4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2],", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]])", "[3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__", "import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion", "4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True)", "in points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses", "self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1,", "Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path from", "geom_helpers def get_poses_helper(points): poses = [] for p in points: pose = PoseStamped()", "[4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr", "LICENSE file in the project root for more information. import unittest import rostest", "def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4]) index", "[1,1], [3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if", "import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path from car_core.common import msgs_helpers,", "def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index", "for more information. import unittest import rostest import rosunit import numpy as np", "[1,1], [2,2], [3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3)", "= np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point)", "np from numpy.testing import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped,", "def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index", "np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "[3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self):", "[1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1)", "[1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1)", "Quaternion from nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses", "self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1,", "is licensed under MIT license. # See the LICENSE file in the project", "import rostest import rosunit import numpy as np from numpy.testing import assert_almost_equal from", "test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr =", "from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for p in", "poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def", "p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr", "TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9])", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point", "PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers", "1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__': import rosunit", "nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses = []", "[3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self):", "np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses", "more information. import unittest import rostest import rosunit import numpy as np from", "np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0],", "= np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def", "in the project root for more information. import unittest import rostest import rosunit", "np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0],", "np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self):", "[1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1)", "= np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__':", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1],", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2],", "test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses", "assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from", "0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4])", "point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4]) index", "msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr)", "= [] for p in points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1],", "return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr,", "= np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses,", "test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index =", "3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2]))", "= np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5],", "pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q", "[7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr =", "Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true =", "point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses", "import Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for", "test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]]))", "def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0],", "np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]])", "get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1],", "1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3])", "= np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point)", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4])", "Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for p", "3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1],", "= np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses =", "np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr,", "geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path from car_core.common import", "= msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr", "1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\", 'test_msgs_helpers', TestMsgsHelpers) rosunit.unitrun(\"car_core\", 'test_geom_helpers', TestGeomHelpers)", "def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self):", "unittest import rostest import rosunit import numpy as np from numpy.testing import assert_almost_equal", "arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3)", "licensed under MIT license. # See the LICENSE file in the project root", "np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__': import", "import unittest import rostest import rosunit import numpy as np from numpy.testing import", "import msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for p in points: pose", "3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses,", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\", 'test_msgs_helpers', TestMsgsHelpers)", "# See the LICENSE file in the project root for more information. import", "from nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points): poses =", "def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index", "4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point", "as np from numpy.testing import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import", "self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4]) index =", "= np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses =", "PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self):", "= np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses =", "assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr)", "poses = [] for p in points: pose = PoseStamped() pose.pose.position = Point(p[0],", "import numpy as np from numpy.testing import assert_almost_equal from std_msgs.msg import Header from", "= np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point)", "msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point =", "from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg", "msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr =", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]])", "test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0],", "0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1])", "assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p)", "np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true", "test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4]) index =", "= np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point)", "[4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class", "np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr,", "[1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0)", "pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase):", "[7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def", "import rosunit import numpy as np from numpy.testing import assert_almost_equal from std_msgs.msg import", "This file is licensed under MIT license. # See the LICENSE file in", "Pose, Point, Quaternion from nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers def", "project root for more information. import unittest import rostest import rosunit import numpy", "get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self):", "license. # See the LICENSE file in the project root for more information.", "arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2])", "0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1],", "msgs_helpers, geom_helpers def get_poses_helper(points): poses = [] for p in points: pose =", "np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]])", "point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses", "[6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]])", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\",", "under MIT license. # See the LICENSE file in the project root for", "= get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0],", "file in the project root for more information. import unittest import rostest import", "Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6],", "the project root for more information. import unittest import rostest import rosunit import", "= Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3],", "[2,2], [3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def", "p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr =", "std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import", "= np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point)", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2],", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]])", "= msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2], [4,5], [6,7]]) poses =", "[3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self):", "self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1,", "test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index =", "self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2],", "def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index", "poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point = np.array([1.1, 1.1]) index = geom_helpers.get_closest_path_point(poses,", "TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4]))", "np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point,", "poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses =", "def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr", "Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4)", "point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses", "self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self):", "= np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses =", "#!/usr/bin/python # This file is licensed under MIT license. # See the LICENSE", "= get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def", "1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1],", "point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point =", "# This file is licensed under MIT license. # See the LICENSE file", "point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point =", "= msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self):", "[2,2], [3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def", "poses = np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0)", "the LICENSE file in the project root for more information. import unittest import", "assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses)", "import Header from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path", "poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses,", "np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 3]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1],", "get_poses_helper(points): poses = [] for p in points: pose = PoseStamped() pose.pose.position =", "test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index =", "p in points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return", "class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9,", "1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1])", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_first(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point", "poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses,", "root for more information. import unittest import rostest import rosunit import numpy as", "= Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q =", "np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index,", "= np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses =", "np.array([-1, 1]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_last(self): poses = np.array([[0,0],", "self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def", "def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True)", "arr = np.array([[1,2], [4,5], [6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]])", "rostest import rosunit import numpy as np from numpy.testing import assert_almost_equal from std_msgs.msg", "self.assertEqual(index, 1) if __name__ == '__main__': import rosunit rosunit.unitrun(\"car_core\", 'test_msgs_helpers', TestMsgsHelpers) rosunit.unitrun(\"car_core\", 'test_geom_helpers',", "p = Point(1,2,3) arr = msgs_helpers.point_to_array(p) assert_almost_equal(arr, np.array([1,2])) self.assertTrue(True) def test_path_poses_to_array_ok(self): poses =", "numpy as np from numpy.testing import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg", "= geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 3) def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4,", "def get_poses_helper(points): poses = [] for p in points: pose = PoseStamped() pose.pose.position", "point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point =", "poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q)", "poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([0.9, 0.9]) index = geom_helpers.get_closest_path_point(poses,", "points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses class", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point", "index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2],", "point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses = np.array([[0,0], [1,1], [1,1], [3,3]]) point =", "Point, Quaternion from nav_msgs.msg import Path from car_core.common import msgs_helpers, geom_helpers def get_poses_helper(points):", "= PoseStamped() pose.pose.position = Point(p[0], p[1], p[2]) poses.append(pose) return poses class TestMsgsHelpers(unittest.TestCase): def", "Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p =", "poses = get_poses_helper([[1,2,3], [4,5,6], [7,8,9]]) arr = msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True)", "[4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0))", "= msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p = Point(1,2,3) arr", "from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion from nav_msgs.msg import Path from car_core.common", "= msgs_helpers.path_poses_to_array(poses) assert_almost_equal(arr, np.array([[1,2], [4,5], [7,8]])) self.assertTrue(True) def test_array_to_point_ok(self): arr = np.array([1,2]) point", "[] for p in points: pose = PoseStamped() pose.pose.position = Point(p[0], p[1], p[2])", "geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 1) def test_get_closest_path_point_far(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point", "[6,7]]) poses = msgs_helpers.array_to_path_poses(arr) poses_true = get_poses_helper([[1,2,0], [4,5,0], [6,7,0]]) self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase):", "def test_get_closest_path_point_single_point(self): poses = np.array([[0,0]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point)", "point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses, point) self.assertEqual(index, 0) def test_get_closest_path_point_matching_points(self): poses", "numpy.testing import assert_almost_equal from std_msgs.msg import Header from geometry_msgs.msg import PoseStamped, Pose, Point,", "arr = np.array([1,2]) point = msgs_helpers.array_to_point(arr) self.assertEqual(point, Point(1,2,0)) def test_array_to_path_poses_ok(self): arr = np.array([[1,2],", "= Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3, 4])) self.assertTrue(True) def test_point_to_array_ok(self): p", "class TestMsgsHelpers(unittest.TestCase): def test_quaterion_to_array_ok(self): q = Quaternion(1,2,3,4) arr = msgs_helpers.quaterion_to_array(q) assert_almost_equal(arr, np.array([1,2, 3,", "poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point =", "poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point = np.array([4, 4]) index = geom_helpers.get_closest_path_point(poses,", "self.assertEqual(poses, poses) class TestGeomHelpers(unittest.TestCase): def test_get_closest_path_point_regular(self): poses = np.array([[0,0], [1,1], [2,2], [3,3]]) point" ]
[ "import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn,", "self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response = self.client.post(reverse('ask:friends.search'), data=form)", "[self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']),", "data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'),", "self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']),", "def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4])", "self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self):", "def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [])", "{'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form =", "setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response =", "= self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response", "FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form =", "self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response =", "test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4,", "self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'),", "self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response", "'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\")", "self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response =", "self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form)", "{'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self):", "reverse from django.test import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import *", "import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users()", "self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text':", "'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text':", "..test.FriendsMixIn import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self):", "self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form)", "= {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form", "response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'}", "def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response", "self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']),", "self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form)", "from django.test import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import * class", "data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'}", "= self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response", "class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form", "<gh_stars>0 from django.shortcuts import reverse from django.test import TestCase from ..test.FriendsMixIn import *", "self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'),", "import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self):", "self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self):", "[]) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']),", "form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5])", "from ..test.FriendsMixIn import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def", "LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'}", "form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\")", "def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [])", "from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations()", "'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text':", "django.test import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase,", "response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'}", "response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form", "= self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form =", "= {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form", "data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser3'} response = self.client.post(reverse('ask:friends.search'),", "test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def", "form = {'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\")", "{'search_text': 'TestUser4'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [self.user4]) def test_search_for_user2_invalid_searchtext_returns_empty_list(self): self.login_user(username=\"TestUser2\") form =", "* from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends()", "self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1,", "from django.shortcuts import reverse from django.test import TestCase from ..test.FriendsMixIn import * from", "= {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [ self.user4, self.user1, self.user5]) def", "def test_search_for_user2_returns_matching_users(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), [", "* class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\")", "FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text':", "django.shortcuts import reverse from django.test import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn", "..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn): def setUp(self): self.create_users() self.make_friends() self.create_invitations() def", "test_search_for_user_with_no_friends_returns_empty_list(self): self.login_user(username=\"IhaveNoFriends\") form = {'search_text': 'TestUser'} response = self.client.post(reverse('ask:friends.search'), data=form) self.assertEqual(list(response.context['friends']), []) def", "import reverse from django.test import TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import", "[ self.user4, self.user1, self.user5]) def test_search_for_user2_with_one_matching_user_returns_one_user(self): self.login_user(username=\"TestUser2\") form = {'search_text': 'TestUser4'} response =", "TestCase from ..test.FriendsMixIn import * from ..test.LoginMixIn import * class FriendSearchViewTest(TestCase, FriendsMixIn, LoginMixIn):" ]
[ "__author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication,", "= QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60,", "-*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import", "text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w =", "QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt, QRect app =", "QLabel from PyQt5.QtCore import Qt, QRect app = QApplication([]) text = \"Hello World!\"", "# -*- coding: utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter,", "QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70,", "= \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12))", "QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70,", "50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect,", "Qt, QRect app = QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180, 130)", "PyQt5.QtCore import Qt, QRect app = QApplication([]) text = \"Hello World!\" pixmap =", "rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0,", "-*- coding: utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont", "utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets", "World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect =", "QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w = QLabel() w.setPixmap(pixmap) w.show()", "= QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0,", "QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap)", "= 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel", "pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect)", "painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft,", "app = QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter", "coding: utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from", "text = \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial',", "import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt,", "= QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w = QLabel() w.setPixmap(pixmap)", "'ipetrash' from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from", "QApplication, QLabel from PyQt5.QtCore import Qt, QRect app = QApplication([]) text = \"Hello", "\"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect", "= QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap,", "painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect", "from PyQt5.QtCore import Qt, QRect app = QApplication([]) text = \"Hello World!\" pixmap", "import Qt, QRect app = QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180,", "12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect =", "QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt, QRect", "PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt, QRect app = QApplication([]) text", "QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text)", "painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50) painter.drawRect(rect) painter.drawText(rect,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import", "0, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50)", "130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0, 0, 70, 50)", "painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text)", "70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect)", "pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter = QPainter(pixmap) painter.setFont(QFont('Arial', 12)) rect = QRect(0,", "from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt, QRect app = QApplication([])", "Qt.TextWordWrap, text) rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w", "60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w = QLabel() w.setPixmap(pixmap) w.show() app.exec()", "python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' from PyQt5.QtGui import QPixmap,", "rect = QRect(0, 60, 70, 50) painter.drawRect(rect) painter.drawText(rect, Qt.AlignLeft, text) w = QLabel()", "QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import Qt, QRect app", "QRect app = QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white)", "= QApplication([]) text = \"Hello World!\" pixmap = QPixmap(180, 130) pixmap.fill(Qt.white) painter =", "from PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore", "import QApplication, QLabel from PyQt5.QtCore import Qt, QRect app = QApplication([]) text =", "PyQt5.QtGui import QPixmap, QPainter, QFont from PyQt5.QtWidgets import QApplication, QLabel from PyQt5.QtCore import" ]
[ "for i in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i", "= requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \"", "8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i", "target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru =", "target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response =", "= re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target", "= target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response", "js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print", "re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \" --- VULN --- \\n\" print", "scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)):", "headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \" --- VULN", "= open(\"targets.txt\", 'r').readlines() for target in targets: target = target.rstrip() headers = {'User-Agent':", "import ssl targets = open(\"targets.txt\", 'r').readlines() for target in targets: target = target.rstrip()", "target in targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0;", "'r').readlines() for target in targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible;", "{'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs", "= requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target", "requests import re import ssl targets = open(\"targets.txt\", 'r').readlines() for target in targets:", "if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for", "= re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \" --- VULN --- \\n\"", "in callback_possibru: print \" --- VULN --- \\n\" print \"[\"+target+\"] \" + new_target", "requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \" ---", "MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for", "response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target =", "new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text", "new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x", "targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT", "headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target + i", "callback_possibru: print \" --- VULN --- \\n\" print \"[\"+target+\"] \" + new_target +", "re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target =", "VULN --- \\n\" print \"[\"+target+\"] \" + new_target + \" \" + x", "scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target,", "open(\"targets.txt\", 'r').readlines() for target in targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0", "ssl targets = open(\"targets.txt\", 'r').readlines() for target in targets: target = target.rstrip() headers", "x in callback_possibru: print \" --- VULN --- \\n\" print \"[\"+target+\"] \" +", "targets = open(\"targets.txt\", 'r').readlines() for target in targets: target = target.rstrip() headers =", "re import ssl targets = open(\"targets.txt\", 'r').readlines() for target in targets: target =", "import re import ssl targets = open(\"targets.txt\", 'r').readlines() for target in targets: target", "for x in callback_possibru: print \" --- VULN --- \\n\" print \"[\"+target+\"] \"", "(compatible; MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response)", "--- VULN --- \\n\" print \"[\"+target+\"] \" + new_target + \" \" +", "target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'}", "i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request)", "js_file_request) for x in callback_possibru: print \" --- VULN --- \\n\" print \"[\"+target+\"]", "print \" --- VULN --- \\n\" print \"[\"+target+\"] \" + new_target + \"", "6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target", "callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru: print \" --- VULN ---", "import requests import re import ssl targets = open(\"targets.txt\", 'r').readlines() for target in", "+ i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)',", "i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in callback_possibru:", "for target in targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE", "in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request =", "requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs: new_target = target +", "i in scanjs: new_target = target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request", "\" --- VULN --- \\n\" print \"[\"+target+\"] \" + new_target + \" \"", "in targets: target = target.rstrip() headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows", "= target + i if(re.match(r'(http|https)\\:\\/\\/',i)): new_target = i js_file_request = requests.get(new_target, headers=headers).text callback_possibru", "headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0],", "NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in scanjs:", "'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs =", "= i js_file_request = requests.get(new_target, headers=headers).text callback_possibru = re.findall(r'(callback|jsonp)', js_file_request) for x in", "Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text scanjs = re.findall(r'src=\"([^\"]+\\.js|json)?\"',response) for i in", "<filename>SOMEChecker/some_finder.py import requests import re import ssl targets = open(\"targets.txt\", 'r').readlines() for target", "= {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'} response = requests.get(target.split('|')[0], headers=headers).text" ]
[]
[ "<gh_stars>1-10 \"\"\"Expose public methods of simple_repr module.\"\"\" from .simple_repr import SimpleRepr __all__ =", "\"\"\"Expose public methods of simple_repr module.\"\"\" from .simple_repr import SimpleRepr __all__ = [\"SimpleRepr\"]" ]
[ "badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) + '\\n') #", "'\\n') # Can change times to something more granular than seconds. f_time.write(' '.join(str(x)", "for x in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge", "badge in events: if badge not in badge_map: badge_map[badge] = badge_count badge_count +=", "f_times: times = [[float(y) for y in x.split()] for x in f_times] with", "def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur", "as G from collections import namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids", "# Ordering is important for mapping results back to the data, if needed.", "are any repeated events, just skip the user. continue userids.append(userid) event_ids = []", "to something more granular than seconds. f_time.write(' '.join(str(x) for x in times) +", "to the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY", "granular than seconds. f_time.write(' '.join(str(x) for x in times) + '\\n') with open(output_userids,", "important for mapping results back to the data, if needed. cur.execute('''SELECT userid, BadgeNames,", "f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids]) with open(output_badge_labels, 'w') as f_badges:", "row in f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times,", "with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge],", "x.split()] for x in f_times] with open(output_userids) as f_userids: next(f_userids) userids = [int(x)", "pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur = conn.cursor() # Ordering", "= 1 userids = [] with open(output_events, 'w') as f_events, open(output_time, 'w') as", "any repeated events, just skip the user. continue userids.append(userid) event_ids = [] for", "something more granular than seconds. f_time.write(' '.join(str(x) for x in times) + '\\n')", "password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur = conn.cursor() # Ordering is important", "'), host='psql-science') as conn: cur = conn.cursor() # Ordering is important for mapping", "more granular than seconds. f_time.write(' '.join(str(x) for x in times) + '\\n') with", "name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids) if __name__", "= [] for badge in events: if badge not in badge_map: badge_map[badge] =", "open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge))", "for x in f_events] with open(output_time) as f_times: times = [[float(y) for y", "with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids])", "event_ids) + '\\n') # Can change times to something more granular than seconds.", "in f_times] with open(output_userids) as f_userids: next(f_userids) userids = [int(x) for x in", "back to the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER", "open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name = row.split(',') badge_map[int(id)]", "= [[int(y) for y in x.split()] for x in f_events] with open(output_time) as", "in times) + '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n'", "= 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events',", "f_events: events = [[int(y) for y in x.split()] for x in f_events] with", "the user. continue userids.append(userid) event_ids = [] for badge in events: if badge", "f_events, open(output_time, 'w') as f_time: for row in cur: userid, events, times =", "f_times] with open(output_userids) as f_userids: next(f_userids) userids = [int(x) for x in f_userids]", "= 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events =", "'.join(str(x) for x in event_ids) + '\\n') # Can change times to something", "print('Not running on DB.') def read_events(): with open(output_events) as f_events: events = [[int(y)", "pg.OperationalError: print('Not running on DB.') def read_events(): with open(output_events) as f_events: events =", "in events: if badge not in badge_map: badge_map[badge] = badge_count badge_count += 1", "userid''') badge_map = {} badge_count = 1 userids = [] with open(output_events, 'w')", "running on DB.') def read_events(): with open(output_events) as f_events: events = [[int(y) for", "events = [[int(y) for y in x.split()] for x in f_events] with open(output_time)", "+= 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) + '\\n') # Can", "continue userids.append(userid) event_ids = [] for badge in events: if badge not in", "as f_userids: next(f_userids) userids = [int(x) for x in f_userids] badge_map = {}", "events: if badge not in badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge])", "in f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in", "'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events',", "1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) + '\\n') # Can change", "in f_events] with open(output_time) as f_times: times = [[float(y) for y in x.split()]", "namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password:", "as f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError:", "cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map = {} badge_count", "results back to the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data", "= [[float(y) for y in x.split()] for x in f_times] with open(output_userids) as", "for row in f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events,", "for x in f_times] with open(output_userids) as f_userids: next(f_userids) userids = [int(x) for", "badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids) if __name__ == '__main__': write()", "x.split()] for x in f_events] with open(output_time) as f_times: times = [[float(y) for", "namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv'", "write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur =", "import namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels =", "read_events(): with open(output_events) as f_events: events = [[int(y) for y in x.split()] for", "# Can change times to something more granular than seconds. f_time.write(' '.join(str(x) for", "'w') as f_events, open(output_time, 'w') as f_time: for row in cur: userid, events,", "events, times = row[0], row[1], row[2] if len(set(times)) != len(times): # If there", "seconds. f_time.write(' '.join(str(x) for x in times) + '\\n') with open(output_userids, 'w') as", "badge_count = 1 userids = [] with open(output_events, 'w') as f_events, open(output_time, 'w')", "f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip()", "userids = [] with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time: for", "badge not in badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x)", "row[1], row[2] if len(set(times)) != len(times): # If there are any repeated events,", "= 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def", "__future__ import print_function import psycopg2 as pg import getpass as G from collections", "the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''')", "= namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB", "open(output_userids) as f_userids: next(f_userids) userids = [int(x) for x in f_userids] badge_map =", "= {} badge_count = 1 userids = [] with open(output_events, 'w') as f_events,", "badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids)", "= [int(x) for x in f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels:", "Ordering is important for mapping results back to the data, if needed. cur.execute('''SELECT", "in event_ids) + '\\n') # Can change times to something more granular than", "x in times) + '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) +", "f_userids: next(f_userids) userids = [int(x) for x in f_userids] badge_map = {} with", "BY userid''') badge_map = {} badge_count = 1 userids = [] with open(output_events,", "'\\n' for x in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for", "y in x.split()] for x in f_events] with open(output_time) as f_times: times =", "host='psql-science') as conn: cur = conn.cursor() # Ordering is important for mapping results", "conn.cursor() # Ordering is important for mapping results back to the data, if", "with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time: for row in cur:", "userids = [int(x) for x in f_userids] badge_map = {} with open(output_badge_labels) as", "output_events = 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events", "f_time.write(' '.join(str(x) for x in times) + '\\n') with open(output_userids, 'w') as f_userids:", "psycopg2 as pg import getpass as G from collections import namedtuple output_events =", "with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name = row.split(',')", "badge)) except pg.OperationalError: print('Not running on DB.') def read_events(): with open(output_events) as f_events:", "userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map = {} badge_count =", "f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running", "userid, events, times = row[0], row[1], row[2] if len(set(times)) != len(times): # If", "'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange',", "= row[0], row[1], row[2] if len(set(times)) != len(times): # If there are any", "password: '), host='psql-science') as conn: cur = conn.cursor() # Ordering is important for", "collections import namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels", "f_events] with open(output_time) as f_times: times = [[float(y) for y in x.split()] for", "row in cur: userid, events, times = row[0], row[1], row[2] if len(set(times)) !=", "{} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name =", "events, just skip the user. continue userids.append(userid) event_ids = [] for badge in", "just skip the user. continue userids.append(userid) event_ids = [] for badge in events:", "cur = conn.cursor() # Ordering is important for mapping results back to the", "'.join(str(x) for x in times) + '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n')", "open(output_time, 'w') as f_time: for row in cur: userid, events, times = row[0],", "'\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in", "so_data ORDER BY userid''') badge_map = {} badge_count = 1 userids = []", "f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids]) with open(output_badge_labels, 'w') as", "not in badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for", "['times', 'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '),", "cur: userid, events, times = row[0], row[1], row[2] if len(set(times)) != len(times): #", "try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur = conn.cursor()", "open(output_events, 'w') as f_events, open(output_time, 'w') as f_time: for row in cur: userid,", "badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.') def read_events(): with", "f_time: for row in cur: userid, events, times = row[0], row[1], row[2] if", "+ '\\n' for x in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n')", "open(output_events) as f_events: events = [[int(y) for y in x.split()] for x in", "x in event_ids) + '\\n') # Can change times to something more granular", "as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids]) with open(output_badge_labels, 'w')", "'w') as f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except", "= conn.cursor() # Ordering is important for mapping results back to the data,", "SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh',", "times) + '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for", "in badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x", "from __future__ import print_function import psycopg2 as pg import getpass as G from", "output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids'])", "for y in x.split()] for x in f_times] with open(output_userids) as f_userids: next(f_userids)", "{} badge_count = 1 userids = [] with open(output_events, 'w') as f_events, open(output_time,", "y in x.split()] for x in f_times] with open(output_userids) as f_userids: next(f_userids) userids", "getpass as G from collections import namedtuple output_events = 'events.txt' output_time = 'time.txt'", "with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur = conn.cursor() #", "import print_function import psycopg2 as pg import getpass as G from collections import", "times to something more granular than seconds. f_time.write(' '.join(str(x) for x in times)", "userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{},", "next(f_userids) userids = [int(x) for x in f_userids] badge_map = {} with open(output_badge_labels)", "next(f_badge_labels) for row in f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip() return", "if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map =", "x in f_times] with open(output_userids) as f_userids: next(f_userids) userids = [int(x) for x", "as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name = row.split(',') badge_map[int(id)] =", "row[0], row[1], row[2] if len(set(times)) != len(times): # If there are any repeated", "x in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge in", "for row in cur: userid, events, times = row[0], row[1], row[2] if len(set(times))", "# If there are any repeated events, just skip the user. continue userids.append(userid)", "output_time = 'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times',", "with open(output_time) as f_times: times = [[float(y) for y in x.split()] for x", "with open(output_events) as f_events: events = [[int(y) for y in x.split()] for x", "Can change times to something more granular than seconds. f_time.write(' '.join(str(x) for x", "f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.') def read_events(): with open(output_events)", "[] for badge in events: if badge not in badge_map: badge_map[badge] = badge_count", "'events', 'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science')", "{}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.') def read_events(): with open(output_events) as", "import psycopg2 as pg import getpass as G from collections import namedtuple output_events", "as f_time: for row in cur: userid, events, times = row[0], row[1], row[2]", "for y in x.split()] for x in f_events] with open(output_time) as f_times: times", "import getpass as G from collections import namedtuple output_events = 'events.txt' output_time =", "times = [[float(y) for y in x.split()] for x in f_times] with open(output_userids)", "len(set(times)) != len(times): # If there are any repeated events, just skip the", "'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn:", "= [] with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time: for row", "FROM so_data ORDER BY userid''') badge_map = {} badge_count = 1 userids =", "change times to something more granular than seconds. f_time.write(' '.join(str(x) for x in", "for badge in events: if badge not in badge_map: badge_map[badge] = badge_count badge_count", "if badge not in badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write('", "= row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids) if __name__ ==", "'badge_map', 'userids']) def write(): try: with pg.connect(database='stackexchange', user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as", "userids.append(userid) event_ids = [] for badge in events: if badge not in badge_map:", "def read_events(): with open(output_events) as f_events: events = [[int(y) for y in x.split()]", "data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map", "[] with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time: for row in", "open(output_time) as f_times: times = [[float(y) for y in x.split()] for x in", "x in f_events] with open(output_time) as f_times: times = [[float(y) for y in", "[[float(y) for y in x.split()] for x in f_times] with open(output_userids) as f_userids:", "needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map = {}", "for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.')", "event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) + '\\n') # Can change times", "'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids]) with open(output_badge_labels,", "as conn: cur = conn.cursor() # Ordering is important for mapping results back", "+ '\\n') # Can change times to something more granular than seconds. f_time.write('", "skip the user. continue userids.append(userid) event_ids = [] for badge in events: if", "for mapping results back to the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp", "for x in f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for", "is important for mapping results back to the data, if needed. cur.execute('''SELECT userid,", "as f_times: times = [[float(y) for y in x.split()] for x in f_times]", "badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on", "from collections import namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids = 'userids.txt'", "row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids) if __name__ == '__main__':", "times = row[0], row[1], row[2] if len(set(times)) != len(times): # If there are", "1 userids = [] with open(output_events, 'w') as f_events, open(output_time, 'w') as f_time:", "f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids)", "!= len(times): # If there are any repeated events, just skip the user.", "ORDER BY userid''') badge_map = {} badge_count = 1 userids = [] with", "badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id,", "G from collections import namedtuple output_events = 'events.txt' output_time = 'time.txt' output_userids =", "in x.split()] for x in f_times] with open(output_userids) as f_userids: next(f_userids) userids =", "id, name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map, userids=userids) if", "Timestamp FROM so_data ORDER BY userid''') badge_map = {} badge_count = 1 userids", "on DB.') def read_events(): with open(output_events) as f_events: events = [[int(y) for y", "in cur: userid, events, times = row[0], row[1], row[2] if len(set(times)) != len(times):", "DB.') def read_events(): with open(output_events) as f_events: events = [[int(y) for y in", "[int(x) for x in f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels)", "if len(set(times)) != len(times): # If there are any repeated events, just skip", "conn: cur = conn.cursor() # Ordering is important for mapping results back to", "as f_events, open(output_time, 'w') as f_time: for row in cur: userid, events, times", "len(times): # If there are any repeated events, just skip the user. continue", "open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x in userids]) with", "pg import getpass as G from collections import namedtuple output_events = 'events.txt' output_time", "in x.split()] for x in f_events] with open(output_time) as f_times: times = [[float(y)", "x in f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row", "mapping results back to the data, if needed. cur.execute('''SELECT userid, BadgeNames, Timestamp FROM", "badge_map = {} badge_count = 1 userids = [] with open(output_events, 'w') as", "BadgeNames, Timestamp FROM so_data ORDER BY userid''') badge_map = {} badge_count = 1", "user='utkarsh', password=<PASSWORD>('DB password: '), host='psql-science') as conn: cur = conn.cursor() # Ordering is", "for x in event_ids) + '\\n') # Can change times to something more", "row[2] if len(set(times)) != len(times): # If there are any repeated events, just", "'time.txt' output_userids = 'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map',", "f_userids.writelines([str(x) + '\\n' for x in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id,", "except pg.OperationalError: print('Not running on DB.') def read_events(): with open(output_events) as f_events: events", "output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try:", "If there are any repeated events, just skip the user. continue userids.append(userid) event_ids", "event_ids = [] for badge in events: if badge not in badge_map: badge_map[badge]", "in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.') def read_events():", "= {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels: id, name", "there are any repeated events, just skip the user. continue userids.append(userid) event_ids =", "f_events.write(' '.join(str(x) for x in event_ids) + '\\n') # Can change times to", "print_function import psycopg2 as pg import getpass as G from collections import namedtuple", "badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) + '\\n')", "'w') as f_time: for row in cur: userid, events, times = row[0], row[1],", "repeated events, just skip the user. continue userids.append(userid) event_ids = [] for badge", "+ '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x) + '\\n' for x", "with open(output_userids) as f_userids: next(f_userids) userids = [int(x) for x in f_userids] badge_map", "in f_badge_labels: id, name = row.split(',') badge_map[int(id)] = name.strip() return SO_events(events=events, times=times, badge_map=badge_map,", "user. continue userids.append(userid) event_ids = [] for badge in events: if badge not", "in userids]) with open(output_badge_labels, 'w') as f_badges: f_badges.write('id, badge\\n') for badge in badge_map:", "f_userids] badge_map = {} with open(output_badge_labels) as f_badge_labels: next(f_badge_labels) for row in f_badge_labels:", "as pg import getpass as G from collections import namedtuple output_events = 'events.txt'", "'userids.txt' output_badge_labels = 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write():", "than seconds. f_time.write(' '.join(str(x) for x in times) + '\\n') with open(output_userids, 'w')", "f_badges: f_badges.write('id, badge\\n') for badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not", "badge in badge_map: f_badges.write('{}, {}\\n'.format(badge_map[badge], badge)) except pg.OperationalError: print('Not running on DB.') def", "= badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in event_ids) +", "for x in times) + '\\n') with open(output_userids, 'w') as f_userids: f_userids.write('userid\\n') f_userids.writelines([str(x)", "as f_events: events = [[int(y) for y in x.split()] for x in f_events]", "[[int(y) for y in x.split()] for x in f_events] with open(output_time) as f_times:", "badge_map: badge_map[badge] = badge_count badge_count += 1 event_ids.append(badge_map[badge]) f_events.write(' '.join(str(x) for x in", "= 'badges.csv' SO_events = namedtuple('SO_events', ['times', 'events', 'badge_map', 'userids']) def write(): try: with" ]
[ "client_name.lower() == \"none\": # use the first one. This is a bit hacky.", "{} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as", "base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None,", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "= remote return remote def add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth", "session = shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if", "SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx =", "\\ client_name.lower() == \"none\": # use the first one. This is a bit", "oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass", "SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as v: _, oauth = v.args", "resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this to prevent repeated hits to", "\"\"\" from inspect import isawaitable from os import getenv from sanic.response import redirect,", "session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if data", "= shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state", "add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface", "the token to the requester when its required.\") # if 'dev_oauth' in session:", "after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1", "session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared", "state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp", "token=access_token) if resp.status in (200, 201): if resp.data is not None and isinstance(resp.data,", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "getter is not implemented. Pass the token to the requester when its required.\")", "if resp.data is not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method", "session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token' in", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "= func('method') if isawaitable(ret): ret = await ret return text(ret.raw_data) def make_token_getter(_remote): context", "ctx = spf.register_plugin(contextualize) except ValueError as v: _, ctx = v.args # @app.route('/')", "return 'Access denied: error=%s' % ( request.args['error'] ) resp = {k: v[0] if", "not oauth: oauth = add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth) spf", "isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method == \"GET\": return True return False", "OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None, remote=None): load_env() if not oauth:", "import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import load_env #having", "= session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token'", "next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot get oauth2", "session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this to", "name) ret = func('method') if isawaitable(ret): ret = await ret return text(ret.raw_data) def", "ValueError as v: _, ctx = v.args # @app.route('/') # async def index(request):", "the License for the specific language governing permissions and limitations under the License.", "state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key']", "import oauthclient from sanic_session_spf import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from", "= request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot", "state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name):", "as v: _, oauth = v.args return oauth def create_oauth2_remote(app, oauth=None): if not", "ret = func('method') if isawaitable(ret): ret = await ret return text(ret.raw_data) def make_token_getter(_remote):", "\"\"\" Copyright 2019 CSIRO Land and Water Licensed under the Apache License, Version", "License for the specific language governing permissions and limitations under the License. \"\"\"", "name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if", "_, oauth = v.args return oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth", "Unless required by applicable law or agreed to in writing, software distributed under", "problem #using them async might be an issue, but maybe not OAUTH2_REMOTES =", "dict): # return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async", "one. This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None)", "'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote", "client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot", "coding: utf-8 -*- \"\"\" Copyright 2019 CSIRO Land and Water Licensed under the", "request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def", "when its required.\") # if 'dev_oauth' in session: # resp = session['dev_oauth'] #", "= spf.register_plugin(contextualize) except ValueError as v: _, ctx = v.args # @app.route('/') #", "if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback:", "= {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in the", "name): func = getattr(remote, name) ret = func('method') if isawaitable(ret): ret = await", "async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http',", "module-local _hopefully_ shouldn't be a problem #using them async might be an issue,", "str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\",", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "= state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context", "to the requester when its required.\") # if 'dev_oauth' in session: # resp", "2019 CSIRO Land and Water Licensed under the Apache License, Version 2.0 (the", "License, Version 2.0 (the \"License\"); you may not use this file except in", "remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' )", "func('method') if isawaitable(ret): ret = await ret return text(ret.raw_data) def make_token_getter(_remote): context =", "not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method == \"GET\": return", "\"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'},", "token to the requester when its required.\") # if 'dev_oauth' in session: #", "the request, we need to put it in the session shared_context = context.shared", "remote def add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app) if", "if 'dev_oauth' in session: # resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote)", "def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as v:", "interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError as v: _,", "# async def index(request): # if 'csiro-to-ldap_oauth' in session: # ret = await", "{'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session", "client_name is None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use the", "spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf", "None: raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\",", "v.args # @app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth' in session: #", "# @app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth' in session: # ret", "the specific language governing permissions and limitations under the License. \"\"\" from inspect", "async might be an issue, but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app):", "from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from", "sanic.response import redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "if 'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email') # if isinstance(ret.data, dict):", "# -*- coding: utf-8 -*- \"\"\" Copyright 2019 CSIRO Land and Water Licensed", "def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name)", "session: # resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "from sanic.response import redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize", "#Oauth1 cannot put state in the request, we need to put it in", "api? async def test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\") or \\", "session = shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request,", "session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import load_env #having these", "resp.data is not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method ==", "_hopefully_ shouldn't be a problem #using them async might be an issue, but", "if not oauth: oauth = add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth)", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state", "session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request,", "\"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\")", "consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key,", "first one. This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name,", "return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback", "in compliance with the License. You may obtain a copy of the License", "ret = await oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data) # return", "is None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use the first", "return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)]", "context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth')", "logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth',", "KIND, either express or implied. See the License for the specific language governing", "remote return remote def add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth =", "\"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in the request, we need to", "remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface)", "create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError:", "its required.\") # if 'dev_oauth' in session: # resp = session['dev_oauth'] # return", "def index(request): # if 'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email') #", "SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import session", "in writing, software distributed under the License is distributed on an \"AS IS\"", "@app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth' in session: # ret =", "text from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient", "governing permissions and limitations under the License. \"\"\" from inspect import isawaitable from", "but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth", "return remote def add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app)", "under the License. \"\"\" from inspect import isawaitable from os import getenv from", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache", "to prevent repeated hits to the api? async def test_oauth2_token(client_name, access_token): if client_name", "= session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this", "await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if resp.data is not None", "error=%s' % ( request.args['error'] ) resp = {k: v[0] if isinstance(v, (tuple, list))", "or agreed to in writing, software distributed under the License is distributed on", "authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None, remote=None): load_env() if", "session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized)", "import getenv from sanic.response import redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize", "oauth = spf.register_plugin(oauthclient) except ValueError as v: _, oauth = v.args return oauth", "getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token", "if remote is None: raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp", "shouldn't be a problem #using them async might be an issue, but maybe", "create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret", "load_env() if not oauth: oauth = add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app,", "== \"none\": # use the first one. This is a bit hacky. client_name", "OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except", "context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None)", "in the request, we need to put it in the session shared_context =", "# use the first one. This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys()))", "not remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface()", "import redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client", "as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import load_env #having these in", "consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/',", "isawaitable from os import getenv from sanic.response import redirect, text from spf import", "@app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if data is None: return 'Access", "shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass the token to the", "oauth = v.args return oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth =", "def logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {})", "shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def", "override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\",", "in a module-local _hopefully_ shouldn't be a problem #using them async might be", "except ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError as v: _, ctx", "getattr(remote, name) ret = func('method') if isawaitable(ret): ret = await ret return text(ret.raw_data)", "these in a module-local _hopefully_ shouldn't be a problem #using them async might", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "OF ANY KIND, either express or implied. See the License for the specific", "return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if data is None:", "else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] =", "need to put it in the session shared_context = context.shared shared_request_context = shared_context.request[id(request)]", "to put it in the session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session", "-*- coding: utf-8 -*- \"\"\" Copyright 2019 CSIRO Land and Water Licensed under", "cannot put state in the request, we need to put it in the", "async def oauth2_method(request, name): func = getattr(remote, name) ret = func('method') if isawaitable(ret):", "as v: _, ctx = v.args # @app.route('/') # async def index(request): #", "#using them async might be an issue, but maybe not OAUTH2_REMOTES = {}", "from filesystem_session_interface import FilesystemSessionInterface from util import load_env #having these in a module-local", "AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state =", "= getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST',", "may not use this file except in compliance with the License. You may", "spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try:", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "import FilesystemSessionInterface from util import load_env #having these in a module-local _hopefully_ shouldn't", "= getattr(remote, name) ret = func('method') if isawaitable(ret): ret = await ret return", "use the first one. This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "utf-8 -*- \"\"\" Copyright 2019 CSIRO Land and Water Licensed under the Apache", "'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email') # if isinstance(ret.data, dict): #", "@ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session',", "Asking for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\":", "callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback", "'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in the request, we", "getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope':", "data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state =", "= await oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data)", "= OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot get oauth2 remote with", "import isawaitable from os import getenv from sanic.response import redirect, text from spf", "await ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter", "return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name =", "session: # ret = await oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data)", "See the License for the specific language governing permissions and limitations under the", "hits to the api? async def test_oauth2_token(client_name, access_token): if client_name is None or", "Water Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2',", "in session: # ret = await oauth.get('email') # if isinstance(ret.data, dict): # return", "client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use the first one. This is", "state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote, name) ret", "try: oauth = spf.register_plugin(oauthclient) except ValueError as v: _, oauth = v.args return", "\\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if resp.data", "is not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method == \"GET\":", "def make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal", "\"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base):", "v for k, v in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session", "@_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is not", "len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback))", "@app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth',", "request.args['error'] ) resp = {k: v[0] if isinstance(v, (tuple, list)) else v for", "# return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name", "this file except in compliance with the License. You may obtain a copy", "@remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True,", "= resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>')", "denied: error=%s' % ( request.args['error'] ) resp = {k: v[0] if isinstance(v, (tuple,", "def test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\") or \\ client_name.lower() ==", "\"License\"); you may not use this file except in compliance with the License.", "FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError as", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "try: ctx = spf.register_plugin(contextualize) except ValueError as v: _, ctx = v.args #", "= await ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context = context.shared", "you may not use this file except in compliance with the License. You", "from inspect import isawaitable from os import getenv from sanic.response import redirect, text", "agreed to in writing, software distributed under the License is distributed on an", "callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this", "we need to put it in the session shared_context = context.shared shared_request_context =", "permissions and limitations under the License. \"\"\" from inspect import isawaitable from os", "if not remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface =", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Pass the token to the requester when its required.\") # if 'dev_oauth' in", "and Water Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "#having these in a module-local _hopefully_ shouldn't be a problem #using them async", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "import load_env #having these in a module-local _hopefully_ shouldn't be a problem #using", ") OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None, remote=None): load_env() if not", "implied. See the License for the specific language governing permissions and limitations under", "from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import session as", "oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret =", "if isawaitable(ret): ret = await ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context", "getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token',", "def create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\")", "a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is", "text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token():", "= shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async", "\"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize'", "{}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this}", "return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter async def", "_server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize.", "License. \"\"\" from inspect import isawaitable from os import getenv from sanic.response import", "oauth2_method(request, name): func = getattr(remote, name) ret = func('method') if isawaitable(ret): ret =", "might be an issue, but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf", "return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request,", "201): if resp.data is not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if", "them async might be an issue, but maybe not OAUTH2_REMOTES = {} def", "in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state", "use this file except in compliance with the License. You may obtain a", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "\"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "# if isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login'))", "= context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index'))", "ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter async", "context): if data is None: return 'Access denied: error=%s' % ( request.args['error'] )", "if state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state:", "\"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in the request, we need", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "index(request): # if 'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email') # if", "async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented.", "RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if", "= context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter", "shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized =", "getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if", "required.\") # if 'dev_oauth' in session: # resp = session['dev_oauth'] # return resp['oauth_token'],", "list)) else v for k, v in data.items()} shared_context = context.shared shared_request_context =", "request, we need to put it in the session shared_context = context.shared shared_request_context", "add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as v: _,", "the License. \"\"\" from inspect import isawaitable from os import getenv from sanic.response", "#TODO: maybe cache this to prevent repeated hits to the api? async def", "required by applicable law or agreed to in writing, software distributed under the", "print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state", "'Access denied: error=%s' % ( request.args['error'] ) resp = {k: v[0] if isinstance(v,", "= create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except", "oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\",", "state in the request, we need to put it in the session shared_context", "state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context =", "= state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] =", "NotImplementedError(\"Out-of-order token getter is not implemented. Pass the token to the requester when", "get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status", "remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if resp.data is not None and", "not implemented. Pass the token to the requester when its required.\") # if", "await oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data) #", "remote is None: raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp =", "session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] =", "access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None, remote=None): load_env()", "oauth2_auth(request, data, context): if data is None: return 'Access denied: error=%s' % (", "CSIRO Land and Water Licensed under the Apache License, Version 2.0 (the \"License\");", "= getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\")", "None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if data is", "oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote", "is not implemented. Pass the token to the requester when its required.\") #", "request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\":", "= add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try:", "or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use the first one. This", "inspect import isawaitable from os import getenv from sanic.response import redirect, text from", "redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import", "consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote", "bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is None:", "= SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as v: _, oauth =", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state', None)", "add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app) if not remote:", "not use this file except in compliance with the License. You may obtain", "= callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this =", "oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order", "( request.args['error'] ) resp = {k: v[0] if isinstance(v, (tuple, list)) else v", "ValueError as v: _, oauth = v.args return oauth def create_oauth2_remote(app, oauth=None): if", "token getter is not implemented. Pass the token to the requester when its", "raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token)", "OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot get oauth2 remote with name", "in the session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {})", "= shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized',", "context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is", "repeated hits to the api? async def test_oauth2_token(client_name, access_token): if client_name is None", "session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote,", "if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state']", "resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async", "using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\",", "\"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using", "{}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if", "= FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError", "not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\")", "= context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state return", "def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass", "ANY KIND, either express or implied. See the License for the specific language", "spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError as v:", "= \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func", "after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth']", "file except in compliance with the License. You may obtain a copy of", "if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def", "make_token_getter(_remote): context = oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context,", "2.0 (the \"License\"); you may not use this file except in compliance with", "oauthclient from sanic_session_spf import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util", "requester when its required.\") # if 'dev_oauth' in session: # resp = session['dev_oauth']", "the requester when its required.\") # if 'dev_oauth' in session: # resp =", "= oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise", "_, ctx = v.args # @app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth'", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "def oauth2_auth(request, data, context): if data is None: return 'Access denied: error=%s' %", "def add_to_app(app, oauth=None, remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app) if not", "language governing permissions and limitations under the License. \"\"\" from inspect import isawaitable", "oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] =", "nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass the token", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if", "it in the session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session',", "in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state", "not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient)", "specific language governing permissions and limitations under the License. \"\"\" from inspect import", "session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import load_env #having these in a", "isinstance(v, (tuple, list)) else v for k, v in data.items()} shared_context = context.shared", "(the \"License\"); you may not use this file except in compliance with the", ") resp = {k: v[0] if isinstance(v, (tuple, list)) else v for k,", "data, context): if data is None: return 'Access denied: error=%s' % ( request.args['error']", "if not oauth: oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\",", "shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token", "an issue, but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app)", "or \\ client_name.lower() == \"none\": # use the first one. This is a", "= shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context):", "= getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret,", "callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for request_token using callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\",", "# return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\")", "<reponame>CSIRO-enviro-informatics/cosmoz-rest-wrapper<filename>src/oauth2_routes.py # -*- coding: utf-8 -*- \"\"\" Copyright 2019 CSIRO Land and Water", "# if 'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email') # if isinstance(ret.data,", "if isinstance(v, (tuple, list)) else v for k, v in data.items()} shared_context =", "v: _, ctx = v.args # @app.route('/') # async def index(request): # if", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "resp.status in (200, 201): if resp.data is not None and isinstance(resp.data, dict): method", "return oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key =", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize) except", "try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx = spf.register_plugin(contextualize)", "and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method == \"GET\": return True return", "except ValueError as v: _, ctx = v.args # @app.route('/') # async def", "and limitations under the License. \"\"\" from inspect import isawaitable from os import", "import SanicPluginsFramework from spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import", "request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put", "cache this to prevent repeated hits to the api? async def test_oauth2_token(client_name, access_token):", "test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\":", "ret = await ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context =", "law or agreed to in writing, software distributed under the License is distributed", "maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth =", "oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status in", "context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized", "implemented. Pass the token to the requester when its required.\") # if 'dev_oauth'", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "remote = OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot get oauth2 remote", "oauth = add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote =", "be a problem #using them async might be an issue, but maybe not", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "= request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback =", "oauth: oauth = add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth) spf =", "the session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state']", "(tuple, list)) else v for k, v in data.items()} shared_context = context.shared shared_request_context", "from util import load_env #having these in a module-local _hopefully_ shouldn't be a", "\"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func =", "request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\",", "issue, but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try:", "import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import session as session_plugin from", "return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote, name) ret =", "json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context):", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "in session: # resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote", "oauth = add_oauth_plugin(app) if not remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app)", "for the specific language governing permissions and limitations under the License. \"\"\" from", "{}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\"", "shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return", "None) if remote is None: raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name))", "shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def", "either express or implied. See the License for the specific language governing permissions", "{}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context =", "context, shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass the token to", "k, v in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session',", "limitations under the License. \"\"\" from inspect import isawaitable from os import getenv", "{\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in the request,", "is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote", "proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking", "raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass the token to the requester", "= {} def add_oauth_plugin(app): spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError", "v: _, oauth = v.args return oauth def create_oauth2_remote(app, oauth=None): if not oauth:", "(200, 201): if resp.data is not None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper()", "is None: return 'Access denied: error=%s' % ( request.args['error'] ) resp = {k:", "spf.plugins.contextualize import contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import session as session_plugin", "shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use the first one.", "this to prevent repeated hits to the api? async def test_oauth2_token(client_name, access_token): if", "data is None: return 'Access denied: error=%s' % ( request.args['error'] ) resp =", "from sanic_oauthlib.client import oauthclient from sanic_session_spf import session as session_plugin from filesystem_session_interface import", "remote with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status in (200,", "shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\")", "maybe cache this to prevent repeated hits to the api? async def test_oauth2_token(client_name,", "a problem #using them async might be an issue, but maybe not OAUTH2_REMOTES", "= oauth.remote_app( 'csiro-to-ldap2', consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2']", "the first one. This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote =", "add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app( 'csiro-to-ldap2',", "put it in the session shared_context = context.shared shared_request_context = shared_context.request[id(request)] session =", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "callback: {}\".format(callback)) after_this = request.args.get(\"after_authorized\", \"/apikey\") state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\":", "consumer_key=consumer_key, consumer_secret=consumer_secret, request_token_params={'scope': 'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return", "after_this} #Oauth1 cannot put state in the request, we need to put it", "= spf.register_plugin(oauthclient) except ValueError as v: _, oauth = v.args return oauth def", "to in writing, software distributed under the License is distributed on an \"AS", "@app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote, name) ret = func('method') if", "ctx = v.args # @app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth' in", "return remote #TODO: maybe cache this to prevent repeated hits to the api?", "# resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe", "= shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data,", "spf.register_plugin(oauthclient) except ValueError as v: _, oauth = v.args return oauth def create_oauth2_remote(app,", "with name \\\"{}\\\"\".format(client_name)) resp = await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201):", "except in compliance with the License. You may obtain a copy of the", "% ( request.args['error'] ) resp = {k: v[0] if isinstance(v, (tuple, list)) else", "= {k: v[0] if isinstance(v, (tuple, list)) else v for k, v in", "func = getattr(remote, name) ret = func('method') if isawaitable(ret): ret = await ret", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "the api? async def test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\") or", "'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] =", "load_env #having these in a module-local _hopefully_ shouldn't be a problem #using them", "put state in the request, we need to put it in the session", "access_token): if client_name is None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": #", "resp = await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if resp.data is", "= await remote.get(\"/api/method\", token=access_token) if resp.status in (200, 201): if resp.data is not", "sanic_oauthlib.client import oauthclient from sanic_session_spf import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface", "spf.register_plugin(contextualize) except ValueError as v: _, ctx = v.args # @app.route('/') # async", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base =", "resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return", "if isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2')", "os import getenv from sanic.response import redirect, text from spf import SanicPluginsFramework from", "'profile'}, base_url='https://oauth.esoil.io/api/', access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app,", "contextualize from sanic_oauthlib.client import oauthclient from sanic_session_spf import session as session_plugin from filesystem_session_interface", "FilesystemSessionInterface from util import load_env #having these in a module-local _hopefully_ shouldn't be", "-*- \"\"\" Copyright 2019 CSIRO Land and Water Licensed under the Apache License,", "async def oauth2_auth(request, data, context): if data is None: return 'Access denied: error=%s'", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "get_oauth_token(): nonlocal context, shared_context raise NotImplementedError(\"Out-of-order token getter is not implemented. Pass the", "redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote, name) ret = func('method')", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback':", "None) after_authorized = state.get('after_authorized', \"/apikey\") if state else \"/apikey\" if 'access_token' in resp:", "shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) state = session.get('oauth_state',", "to the api? async def test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\")", "shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context", "pass try: ctx = spf.register_plugin(contextualize) except ValueError as v: _, ctx = v.args", "shared_request_context.get('session', {}) session.pop('csiro-to-ldap2_oauth', None) return redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context):", "\"none\": # use the first one. This is a bit hacky. client_name =", "oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data) # return", "'dev_oauth' in session: # resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return", "# return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def", "# return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this to prevent", "@remote.authorized_handler async def oauth2_auth(request, data, context): if data is None: return 'Access denied:", "context = oauth.context shared_context = context.shared @_remote.tokengetter async def get_oauth_token(): nonlocal context, shared_context", "compliance with the License. You may obtain a copy of the License at", "if resp.status in (200, 201): if resp.data is not None and isinstance(resp.data, dict):", "= state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request, name): func = getattr(remote, name)", "\"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\"", "async def test_oauth2_token(client_name, access_token): if client_name is None or client_name.startswith(\"_\") or \\ client_name.lower()", "= getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In AutoAuthorize. Asking for", "for k, v in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session =", "filesystem_session_interface import FilesystemSessionInterface from util import load_env #having these in a module-local _hopefully_", "express or implied. See the License for the specific language governing permissions and", "from sanic_session_spf import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import", "= SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin, interface=session_interface) except ValueError: pass try: ctx", "remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app) if not remote: remote =", "create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback = request.app.url_for('oauth2_auth', _external=True, _scheme='http', _server=override_server_name) proxy_route_base", "async def index(request): # if 'csiro-to-ldap_oauth' in session: # ret = await oauth.get('email')", "\"/apikey\") if state else \"/apikey\" if 'access_token' in resp: session['csiro-to-ldap2_oauth'] = resp if", "None and isinstance(resp.data, dict): method = str(resp.data.get(\"method\")).upper() if method == \"GET\": return True", "callback} @ctx.route('/oauth2/logout') def logout(request, context): shared_context = context.shared shared_request_context = shared_context.request[id(request)] session =", "remote #TODO: maybe cache this to prevent repeated hits to the api? async", "be an issue, but maybe not OAUTH2_REMOTES = {} def add_oauth_plugin(app): spf =", "= shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback} @ctx.route('/oauth2/logout')", "else v for k, v in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)]", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "Copyright 2019 CSIRO Land and Water Licensed under the Apache License, Version 2.0", "# ret = await oauth.get('email') # if isinstance(ret.data, dict): # return json(ret.data) #", "if data is None: return 'Access denied: error=%s' % ( request.args['error'] ) resp", "v[0] if isinstance(v, (tuple, list)) else v for k, v in data.items()} shared_context", "= v.args return oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app)", "applicable law or agreed to in writing, software distributed under the License is", "# if 'dev_oauth' in session: # resp = session['dev_oauth'] # return resp['oauth_token'], resp['oauth_token_secret']", "= add_oauth_plugin(app) consumer_key = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_KEY\", \"example1\") consumer_secret = getenv(\"OAUTH2_CSIRO_LDAP_CONSUMER_SECRET\", \"password1\") remote = oauth.remote_app(", "def oauth2_method(request, name): func = getattr(remote, name) ret = func('method') if isawaitable(ret): ret", "is None: raise RuntimeError(\"Cannot get oauth2 remote with name \\\"{}\\\"\".format(client_name)) resp = await", "= next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is None: raise RuntimeError(\"Cannot get", "prevent repeated hits to the api? async def test_oauth2_token(client_name, access_token): if client_name is", "None: return 'Access denied: error=%s' % ( request.args['error'] ) resp = {k: v[0]", "from os import getenv from sanic.response import redirect, text from spf import SanicPluginsFramework", "shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {}) session['oauth_state'] = state return {'callback': callback}", "= v.args # @app.route('/') # async def index(request): # if 'csiro-to-ldap_oauth' in session:", "Land and Water Licensed under the Apache License, Version 2.0 (the \"License\"); you", "isawaitable(ret): ret = await ret return text(ret.raw_data) def make_token_getter(_remote): context = oauth.context shared_context", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "resp = {k: v[0] if isinstance(v, (tuple, list)) else v for k, v", "in (200, 201): if resp.data is not None and isinstance(resp.data, dict): method =", "shared_request_context.get('session', {}) state = session.get('oauth_state', None) after_authorized = state.get('after_authorized', \"/apikey\") if state else", "This is a bit hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if", "\"after_authorized\": after_this} #Oauth1 cannot put state in the request, we need to put", "hacky. client_name = next(iter(OAUTH2_REMOTES.keys())) remote = OAUTH2_REMOTES.get(client_name, None) if remote is None: raise", "getenv from sanic.response import redirect, text from spf import SanicPluginsFramework from spf.plugins.contextualize import", "sanic_session_spf import session as session_plugin from filesystem_session_interface import FilesystemSessionInterface from util import load_env", "oauth=None, remote=None): load_env() if not oauth: oauth = add_oauth_plugin(app) if not remote: remote", "access_token_method='POST', access_token_url='https://oauth.esoil.io/oauth2/token', authorize_url='https://oauth.esoil.io/oauth2/authorize' ) OAUTH2_REMOTES['csiro-to-ldap2'] = remote return remote def add_to_app(app, oauth=None, remote=None):", "{k: v[0] if isinstance(v, (tuple, list)) else v for k, v in data.items()}", "except ValueError as v: _, oauth = v.args return oauth def create_oauth2_remote(app, oauth=None):", "util import load_env #having these in a module-local _hopefully_ shouldn't be a problem", "redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize async def create_oauth2(request, context): override_server_name = getenv(\"SANIC_OVERRIDE_SERVER_NAME\", \"localhost:9001\") callback =", "remote: remote = create_oauth2_remote(app, oauth) spf = SanicPluginsFramework(app) try: session_interface = FilesystemSessionInterface() spf.register_plugin(session_plugin,", "_external=True, _scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base))", "spf = SanicPluginsFramework(app) try: oauth = spf.register_plugin(oauthclient) except ValueError as v: _, oauth", "_scheme='http', _server=override_server_name) proxy_route_base = getenv(\"SANIC_PROXY_ROUTE_BASE\", \"\") if len(proxy_route_base): callback = callback.replace(\"/oauth2/auth\", \"/{}oauth2/auth\".format(proxy_route_base)) print(\"In", "make_token_getter(remote) return remote #TODO: maybe cache this to prevent repeated hits to the", "v in data.items()} shared_context = context.shared shared_request_context = shared_context.request[id(request)] session = shared_request_context.get('session', {})", "v.args return oauth def create_oauth2_remote(app, oauth=None): if not oauth: oauth = add_oauth_plugin(app) consumer_key", "a module-local _hopefully_ shouldn't be a problem #using them async might be an", "isinstance(ret.data, dict): # return json(ret.data) # return str(ret.data) # return redirect(app.url_for('login')) @app.route('/create_oauth2') @remote.autoauthorize", "resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this to prevent repeated hits", "redirect(app.url_for('index')) @app.route('/oauth2/auth') @remote.authorized_handler async def oauth2_auth(request, data, context): if data is None: return", "return resp['oauth_token'], resp['oauth_token_secret'] make_token_getter(remote) return remote #TODO: maybe cache this to prevent repeated", "state = {\"remote_app\": 'csiro-to-ldap2', \"oauth_version\": \"2.0\", \"after_authorized\": after_this} #Oauth1 cannot put state in", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "state: state['access_token_session_key'] = \"csiro-to-ldap2_oauth\" session['oauth_state'] = state return redirect(after_authorized) @app.route('/oauth2/method/<name>') async def oauth2_method(request,", "if client_name is None or client_name.startswith(\"_\") or \\ client_name.lower() == \"none\": # use", "ValueError: pass try: ctx = spf.register_plugin(contextualize) except ValueError as v: _, ctx =" ]
[ "The metric to use when calculating distance between instances in a feature array.", "is called on each pair of instances (rows) and the resulting value recorded.", ":param random_state: If int, random_state is the seed used by the random number", "'angle' then it is used as a summary node of all points contained", "in the data. The components are sorted by variance` :rtype: numpy.ndarray \"\"\" if", "gets stuck in a bad local minimum increasing the learning rate may help.", "30.0 :param float early_exaggeration: Controls how tight natural clusters in the original space", "float = 1e-7, metric: str = \"euclidean\", init: str = \"random\", verbose: int", "or TruncatedSVD for sparse data) to reduce the number of dimensions to a", "low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is", "data or TruncatedSVD for sparse data) to reduce the number of dimensions to", "but exact, algorithm in O(N^2) time. The exact algorithm should be used when", "speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred to", "-> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a tool to visualize", "float learning_rate: The learning rate for t-SNE is usually in the range [10.0,", "be between them. For larger values, the space between natural clusters will be", "It converts similarities between data points to joint probabilities and tries to minimize", "in [3]) of a distant node as measured from a point. If this", "pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed to be a distance matrix.", "how tight natural clusters in the original space are in the embedded space", "def tsne( embedding: np.ndarray, num_components: int = 2, perplexity: float = 30.0, early_exaggeration:", "cost function increases during initial optimization, the early exaggeration factor or the learning", "value is rounded to the next multiple of 50. Default 300 :param float", "Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the slower, but", "is a string, it must be one of the options allowed by scipy.spatial.distance.pdist", "norm is below this threshold, the optimization will be stopped. Default 1e-7 :param", "how much space will be between them. For larger values, the space between", "space. Default 2 :param float perplexity: The perplexity is related to the number", "instance used by `np.random`. Note that different initializations might result in different local", "components are sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding is None: raise", "method: str = 'barnes_hut', angle: float = 0.5 ) -> np.ndarray: \"\"\" t-distributed", "rate is too high, the data may look like a 'ball' with any", "we can get different results. It is highly recommended to use another dimensionality", "distances between samples. :param numpy.ndarray embedding: The embedding in which PCA will be", "below this threshold, the optimization will be stopped. Default 1e-7 :param metric: The", "numpy array of shape (n_samples, num_components). PCA initialization cannot be used with precomputed", "30.0, early_exaggeration: float = 12.0, learning_rate: float = 200.0, num_iterations: int = 1000,", "learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value", "initial iterations with early exaggeration. Note that progress is only checked every 50", ":param int num_iterations_without_progress: Maximum number of iterations without progress before we abort the", "for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is", "the learning rate is too high, the data may look like a 'ball'", "two arrays from X as input and return a value indicating the distance", ":param str method: By default the gradient calculation algorithm uses Barnes-Hut approximation running", "learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the", ":param float min_grad_norm: If the gradient norm is below this threshold, the optimization", "might be too high. Default 12.0 :param float learning_rate: The learning rate for", "is too low, most points may look compressed in a dense cloud with", "perplexity: The perplexity is related to the number of nearest neighbors that is", "of maximum variance in the data. The components are sorted by variance` :rtype:", "within it. This method is not very sensitive to changes in this parameter", "this size is below 'angle' then it is used as a summary node", "used if method='barnes_hut' This is the trade-off between speed and accuracy for Barnpcaes-Hut", "the optimization. Should be at least 250. Default 1000 :param int num_iterations_without_progress: Maximum", "of 50. Default 300 :param float min_grad_norm: If the gradient norm is below", "is used in other manifold learning algorithms. Larger datasets usually require a larger", "perplexity: float = 30.0, early_exaggeration: float = 12.0, learning_rate: float = 200.0, num_iterations:", "other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting", "in feature space, representing the directions of maximum variance in the data. The", "be a distance matrix. Alternatively, if metric is a callable function, it is", "different results. It is highly recommended to use another dimensionality reduction method (e.g.", "None: raise ValueError('embedding must be specified but was None') if not num_components: raise", "convex, i.e. with different initializations we can get different results. It is highly", "the random number generator; If RandomState instance, random_state is the random number generator;", "variance in the data. The components are sorted by variance` :rtype: numpy.ndarray \"\"\"", "manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a", "indicating the distance between them. The default is \"euclidean\" which is interpreted as", "cost function gets stuck in a bad local minimum increasing the learning rate", "used with precomputed distances and is usually more globally stable than random initialization.", "The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If", ":type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default the gradient calculation algorithm", "1000, num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7, metric: str = \"euclidean\",", "clusters will be larger in the embedded space. Again, the choice of this", "the random number generator; If None, the random number generator is the RandomState", "accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred to as theta", "init: Initialization of embedding. Possible options are 'random', 'pca', and a numpy array", "(c) Microsoft Corporation. # Licensed under the MIT license. import numpy as np", "to be better than 3%. However, the exact method cannot scale to millions", "number generator is the RandomState instance used by `np.random`. Note that different initializations", "embedding: np.ndarray, num_components: int = 2, perplexity: float = 30.0, early_exaggeration: float =", "is related to the number of nearest neighbors that is used in other", "features is very high. This will suppress some noise and speed up the", "np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a tool to visualize high-dimensional", "in a dense cloud with few outliers. If the cost function gets stuck", "learning_rate: The learning rate for t-SNE is usually in the range [10.0, 1000.0].", "interpreted as squared euclidean distance. Default 'euclidean' :type metric: Union[str, Callable] :param init:", "embedding. Possible options are 'random', 'pca', and a numpy array of shape (n_samples,", "of pairwise distances between samples. :param numpy.ndarray embedding: The embedding in which PCA", "Larger datasets usually require a larger perplexity. Consider selecting a value between 5", "the cost function increases during initial optimization, the early exaggeration factor or the", "will be applied :param int num_components: Dimension of the embedded space. Default 2", "int = 1, random_state: Union[int, np.random.RandomState, None] = None, method: str = 'barnes_hut',", "speed up the computation of pairwise distances between samples. :param numpy.ndarray embedding: The", "calculating distance between instances in a feature array. If metric is a string,", "Callable] :param init: Initialization of embedding. Possible options are 'random', 'pca', and a", "the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the", "None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init,", "is below 'angle' then it is used as a summary node of all", "to as theta in [3]) of a distant node as measured from a", "datasets usually require a larger perplexity. Consider selecting a value between 5 and", "that progress is only checked every 50 iterations so this value is rounded", "int num_iterations: Maximum number of iterations for the optimization. Should be at least", "must be specified but was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate,", "T-SNE. 'angle' is the angular size (referred to as theta in [3]) of", "random_state is the random number generator; If None, the random number generator is", "or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed", "the number of dimensions to a reasonable amount (e.g. 50) if the number", "= \"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState, None] = None, method:", "space and how much space will be between them. For larger values, the", "tool to visualize high-dimensional data. It converts similarities between data points to joint", "function, it is called on each pair of instances (rows) and the resulting", "contained within it. This method is not very sensitive to changes in this", "return a value indicating the distance between them. The default is \"euclidean\" which", "quickly increasing error. Default 0.5 :return: A np.ndarray of principal axes in feature", "when nearest-neighbor errors need to be better than 3%. However, the exact method", "natural clusters will be larger in the embedded space. Again, the choice of", "float early_exaggeration: Controls how tight natural clusters in the original space are in", "tight natural clusters in the original space are in the embedded space and", "and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the", "Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import numpy as", "method: By default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN)", "between instances in a feature array. If metric is a string, it must", "node of all points contained within it. This method is not very sensitive", "= 'barnes_hut', angle: float = 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor", "parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is", "one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a", "and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred to as", "embedded space. Again, the choice of this parameter is not very critical. If", "allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.", "use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for", "np from typing import Union from sklearn.manifold import TSNE def tsne( embedding: np.ndarray,", ":param int verbose: Verbosity level. Default 1 :param random_state: If int, random_state is", "then it is used as a summary node of all points contained within", "By default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time.", "is highly recommended to use another dimensionality reduction method (e.g. PCA for dense", "its nearest neighbours. If the learning rate is too low, most points may", "rounded to the next multiple of 50. Default 300 :param float min_grad_norm: If", "metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X", "ValueError('embedding must be specified but was None') if not num_components: raise ValueError('num_components must", "the learning rate is too low, most points may look compressed in a", "to a reasonable amount (e.g. 50) if the number of features is very", "few outliers. If the cost function gets stuck in a bad local minimum", "applied :param int num_components: Dimension of the embedded space. Default 2 :param float", "to use when calculating distance between instances in a feature array. If metric", "metric is \"precomputed\", X is assumed to be a distance matrix. Alternatively, if", "be specified but was None') if not num_components: raise ValueError('num_components must be specified", "when calculating distance between instances in a feature array. If metric is a", "with different initializations we can get different results. It is highly recommended to", "This method is not very sensitive to changes in this parameter in the", "selecting a value between 5 and 50. The choice is not extremely critical", "globally stable than random initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param int", "require a larger perplexity. Consider selecting a value between 5 and 50. The", "the learning rate may help. Default 200.0 :param int num_iterations: Maximum number of", "feature array. If metric is a string, it must be one of the", "than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly", "specified but was None') if not num_components: raise ValueError('num_components must be specified but", "in the range of 0.2 - 0.8. Angle less than 0.2 has quickly", "Again, the choice of this parameter is not very critical. If the cost", "cannot scale to millions of examples. Default 'barnes_hut' :param float angle: Only used", "distance between them. The default is \"euclidean\" which is interpreted as squared euclidean", "that different initializations might result in different local minima of the cost function.", "This is the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is", "not very critical. If the cost function increases during initial optimization, the early", "recorded. The callable should take two arrays from X as input and return", "'euclidean' :type metric: Union[str, Callable] :param init: Initialization of embedding. Possible options are", "300, min_grad_norm: float = 1e-7, metric: str = \"euclidean\", init: str = \"random\",", "is very high. This will suppress some noise and speed up the computation", "each pair of instances (rows) and the resulting value recorded. The callable should", "t-SNE has a cost function that is not convex, i.e. with different initializations", "between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred", "of dimensions to a reasonable amount (e.g. 50) if the number of features", "maximum variance in the data. The components are sorted by variance` :rtype: numpy.ndarray", "exaggeration factor or the learning rate might be too high. Default 12.0 :param", "but was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm,", "metric is a string, it must be one of the options allowed by", "size is below 'angle' then it is used as a summary node of", "usually require a larger perplexity. Consider selecting a value between 5 and 50.", "early exaggeration. Note that progress is only checked every 50 iterations so this", "Note that different initializations might result in different local minima of the cost", "slower, but exact, algorithm in O(N^2) time. The exact algorithm should be used", "look like a 'ball' with any point approximately equidistant from its nearest neighbours.", ":param numpy.ndarray embedding: The embedding in which PCA will be applied :param int", "of iterations for the optimization. Should be at least 250. Default 1000 :param", "float angle: Only used if method='barnes_hut' This is the trade-off between speed and", "random number generator; If RandomState instance, random_state is the random number generator; If", "str method: By default the gradient calculation algorithm uses Barnes-Hut approximation running in", "for dense data or TruncatedSVD for sparse data) to reduce the number of", "dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data)", "the learning rate might be too high. Default 12.0 :param float learning_rate: The", "should be used when nearest-neighbor errors need to be better than 3%. However,", "parameter. Default 30.0 :param float early_exaggeration: Controls how tight natural clusters in the", "larger values, the space between natural clusters will be larger in the embedded", "sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components: int = 2, perplexity: float", "point approximately equidistant from its nearest neighbours. If the learning rate is too", "next multiple of 50. Default 300 :param float min_grad_norm: If the gradient norm", "angle: float = 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE", "float = 30.0, early_exaggeration: float = 12.0, learning_rate: float = 200.0, num_iterations: int", "will be between them. For larger values, the space between natural clusters will", "nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually", "representing the directions of maximum variance in the data. The components are sorted", "specified but was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress,", "to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding", "typing import Union from sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components: int", "choice is not extremely critical since t-SNE is quite insensitive to this parameter.", "initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default", "0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a tool", "The default is \"euclidean\" which is interpreted as squared euclidean distance. Default 'euclidean'", "float = 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is", "the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will", "not extremely critical since t-SNE is quite insensitive to this parameter. Default 30.0", "Consider selecting a value between 5 and 50. The choice is not extremely", "Union[str, Callable] :param init: Initialization of embedding. Possible options are 'random', 'pca', and", "Controls how tight natural clusters in the original space are in the embedded", "with few outliers. If the cost function gets stuck in a bad local", "Default 'euclidean' :type metric: Union[str, Callable] :param init: Initialization of embedding. Possible options", "errors need to be better than 3%. However, the exact method cannot scale", "than random initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param int verbose: Verbosity", "learning rate may help. Default 200.0 :param int num_iterations: Maximum number of iterations", "local minima of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method:", "clusters in the original space are in the embedded space and how much", "Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional", "reduce the number of dimensions to a reasonable amount (e.g. 50) if the", "another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse", "Note that progress is only checked every 50 iterations so this value is", "t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is", "a distance matrix. Alternatively, if metric is a callable function, it is called", "exact algorithm should be used when nearest-neighbor errors need to be better than", "optimization. Should be at least 250. Default 1000 :param int num_iterations_without_progress: Maximum number", "time and angle greater 0.8 has quickly increasing error. Default 0.5 :return: A", "assumed to be a distance matrix. Alternatively, if metric is a callable function,", "between samples. :param numpy.ndarray embedding: The embedding in which PCA will be applied", "perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state, method=method, angle=angle )", "license. import numpy as np from typing import Union from sklearn.manifold import TSNE", "we abort the optimization, used after 250 initial iterations with early exaggeration. Note", "number generator; If RandomState instance, random_state is the random number generator; If None,", "if method='barnes_hut' This is the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE.", "too low, most points may look compressed in a dense cloud with few", "with precomputed distances and is usually more globally stable than random initialization. Default", ":return: A np.ndarray of principal axes in feature space, representing the directions of", "so this value is rounded to the next multiple of 50. Default 300", "the angular size (referred to as theta in [3]) of a distant node", "in different local minima of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param", "Default 200.0 :param int num_iterations: Maximum number of iterations for the optimization. Should", "used as a summary node of all points contained within it. This method", "or the learning rate might be too high. Default 12.0 :param float learning_rate:", "None') if not num_components: raise ValueError('num_components must be specified but was None') model", "None, method: str = 'barnes_hut', angle: float = 0.5 ) -> np.ndarray: \"\"\"", "Verbosity level. Default 1 :param random_state: If int, random_state is the seed used", "examples. Default 'barnes_hut' :param float angle: Only used if method='barnes_hut' This is the", "sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding must", "iterations for the optimization. Should be at least 250. Default 1000 :param int", "may look compressed in a dense cloud with few outliers. If the cost", "used when nearest-neighbor errors need to be better than 3%. However, the exact", "float = 12.0, learning_rate: float = 200.0, num_iterations: int = 1000, num_iterations_without_progress: int", "optimization, the early exaggeration factor or the learning rate might be too high.", "that is not convex, i.e. with different initializations we can get different results.", "optimization, used after 250 initial iterations with early exaggeration. Note that progress is", "of the embedded space. Default 2 :param float perplexity: The perplexity is related", "12.0 :param float learning_rate: The learning rate for t-SNE is usually in the", "learning rate is too high, the data may look like a 'ball' with", "time. method='exact' will run on the slower, but exact, algorithm in O(N^2) time.", "The exact algorithm should be used when nearest-neighbor errors need to be better", "measured from a point. If this size is below 'angle' then it is", "the range [10.0, 1000.0]. If the learning rate is too high, the data", "precomputed distances and is usually more globally stable than random initialization. Default 'random'", "= 1e-7, metric: str = \"euclidean\", init: str = \"random\", verbose: int =", "every 50 iterations so this value is rounded to the next multiple of", "euclidean distance. Default 'euclidean' :type metric: Union[str, Callable] :param init: Initialization of embedding.", "= 12.0, learning_rate: float = 200.0, num_iterations: int = 1000, num_iterations_without_progress: int =", "method is not very sensitive to changes in this parameter in the range", "to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD", "int num_components: Dimension of the embedded space. Default 2 :param float perplexity: The", "the embedded space. Default 2 :param float perplexity: The perplexity is related to", "raise ValueError('embedding must be specified but was None') if not num_components: raise ValueError('num_components", "Licensed under the MIT license. import numpy as np from typing import Union", "divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data.", "of iterations without progress before we abort the optimization, used after 250 initial", "from its nearest neighbours. If the learning rate is too low, most points", "embedding is None: raise ValueError('embedding must be specified but was None') if not", "directions of maximum variance in the data. The components are sorted by variance`", "neighbors that is used in other manifold learning algorithms. Larger datasets usually require", "used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.", "and how much space will be between them. For larger values, the space", "gradient norm is below this threshold, the optimization will be stopped. Default 1e-7", "Default 300 :param float min_grad_norm: If the gradient norm is below this threshold,", "of a distant node as measured from a point. If this size is", "The perplexity is related to the number of nearest neighbors that is used", "progress before we abort the optimization, used after 250 initial iterations with early", "int num_iterations_without_progress: Maximum number of iterations without progress before we abort the optimization,", "If this size is below 'angle' then it is used as a summary", "matrix. Alternatively, if metric is a callable function, it is called on each", "options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in", "Alternatively, if metric is a callable function, it is called on each pair", "used by `np.random`. Note that different initializations might result in different local minima", "if metric is a callable function, it is called on each pair of", "and the resulting value recorded. The callable should take two arrays from X", "= 1, random_state: Union[int, np.random.RandomState, None] = None, method: str = 'barnes_hut', angle:", "distance between instances in a feature array. If metric is a string, it", "different local minima of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str", "'barnes_hut' :param float angle: Only used if method='barnes_hut' This is the trade-off between", "If the cost function increases during initial optimization, the early exaggeration factor or", "value recorded. The callable should take two arrays from X as input and", ":param init: Initialization of embedding. Possible options are 'random', 'pca', and a numpy", "noise and speed up the computation of pairwise distances between samples. :param numpy.ndarray", "can get different results. It is highly recommended to use another dimensionality reduction", "number of features is very high. This will suppress some noise and speed", "[10.0, 1000.0]. If the learning rate is too high, the data may look", "float perplexity: The perplexity is related to the number of nearest neighbors that", "metric: The metric to use when calculating distance between instances in a feature", "a callable function, it is called on each pair of instances (rows) and", "be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or", "if the number of features is very high. This will suppress some noise", ":type init: Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default 1 :param random_state:", "from a point. If this size is below 'angle' then it is used", "[3]) of a distant node as measured from a point. If this size", "are sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding", "compressed in a dense cloud with few outliers. If the cost function gets", "is rounded to the next multiple of 50. Default 300 :param float min_grad_norm:", "algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between", "too high, the data may look like a 'ball' with any point approximately", "between natural clusters will be larger in the embedded space. Again, the choice", "t-SNE is a tool to visualize high-dimensional data. It converts similarities between data", "int, random_state is the seed used by the random number generator; If RandomState", "feature space, representing the directions of maximum variance in the data. The components", "= 1000, num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7, metric: str =", "stuck in a bad local minimum increasing the learning rate may help. Default", "by variance` :rtype: numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding must be", "critical since t-SNE is quite insensitive to this parameter. Default 30.0 :param float", ":param int num_components: Dimension of the embedded space. Default 2 :param float perplexity:", "a point. If this size is below 'angle' then it is used as", "increasing computation time and angle greater 0.8 has quickly increasing error. Default 0.5", "is the seed used by the random number generator; If RandomState instance, random_state", "np.ndarray of principal axes in feature space, representing the directions of maximum variance", "= 2, perplexity: float = 30.0, early_exaggeration: float = 12.0, learning_rate: float =", "a 'ball' with any point approximately equidistant from its nearest neighbours. If the", "the exact method cannot scale to millions of examples. Default 'barnes_hut' :param float", "squared euclidean distance. Default 'euclidean' :type metric: Union[str, Callable] :param init: Initialization of", "minimum increasing the learning rate may help. Default 200.0 :param int num_iterations: Maximum", "very high. This will suppress some noise and speed up the computation of", "angle greater 0.8 has quickly increasing error. Default 0.5 :return: A np.ndarray of", "to the number of nearest neighbors that is used in other manifold learning", "and speed up the computation of pairwise distances between samples. :param numpy.ndarray embedding:", "num_iterations_without_progress: Maximum number of iterations without progress before we abort the optimization, used", "number of iterations without progress before we abort the optimization, used after 250", "is used as a summary node of all points contained within it. This", "The choice is not extremely critical since t-SNE is quite insensitive to this", "get different results. It is highly recommended to use another dimensionality reduction method", "least 250. Default 1000 :param int num_iterations_without_progress: Maximum number of iterations without progress", "in a bad local minimum increasing the learning rate may help. Default 200.0", "Union[int, np.random.RandomState, None] = None, method: str = 'barnes_hut', angle: float = 0.5", "use when calculating distance between instances in a feature array. If metric is", ":param float early_exaggeration: Controls how tight natural clusters in the original space are", "during initial optimization, the early exaggeration factor or the learning rate might be", "approximately equidistant from its nearest neighbours. If the learning rate is too low,", "at least 250. Default 1000 :param int num_iterations_without_progress: Maximum number of iterations without", "is assumed to be a distance matrix. Alternatively, if metric is a callable", "random_state is the seed used by the random number generator; If RandomState instance,", "to changes in this parameter in the range of 0.2 - 0.8. Angle", "stable than random initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param int verbose:", "related to the number of nearest neighbors that is used in other manifold", "be stopped. Default 1e-7 :param metric: The metric to use when calculating distance", "PCA will be applied :param int num_components: Dimension of the embedded space. Default", "generator; If None, the random number generator is the RandomState instance used by", "verbose: int = 1, random_state: Union[int, np.random.RandomState, None] = None, method: str =", "help. Default 200.0 :param int num_iterations: Maximum number of iterations for the optimization.", "'random', 'pca', and a numpy array of shape (n_samples, num_components). PCA initialization cannot", "data may look like a 'ball' with any point approximately equidistant from its", "as np from typing import Union from sklearn.manifold import TSNE def tsne( embedding:", "suppress some noise and speed up the computation of pairwise distances between samples.", "may help. Default 200.0 :param int num_iterations: Maximum number of iterations for the", "between them. The default is \"euclidean\" which is interpreted as squared euclidean distance.", "'random' :type init: Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default 1 :param", "in O(NlogN) time. method='exact' will run on the slower, but exact, algorithm in", "(referred to as theta in [3]) of a distant node as measured from", "sensitive to changes in this parameter in the range of 0.2 - 0.8.", "a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed to", "the number of nearest neighbors that is used in other manifold learning algorithms.", "Only used if method='barnes_hut' This is the trade-off between speed and accuracy for", "angular size (referred to as theta in [3]) of a distant node as", "a cost function that is not convex, i.e. with different initializations we can", "space, representing the directions of maximum variance in the data. The components are", "num_iterations: Maximum number of iterations for the optimization. Should be at least 250.", "rate is too low, most points may look compressed in a dense cloud", "like a 'ball' with any point approximately equidistant from its nearest neighbours. If", "to be a distance matrix. Alternatively, if metric is a callable function, it", "axes in feature space, representing the directions of maximum variance in the data.", "the number of features is very high. This will suppress some noise and", "Default 'barnes_hut' :param float angle: Only used if method='barnes_hut' This is the trade-off", "numpy as np from typing import Union from sklearn.manifold import TSNE def tsne(", "cannot be used with precomputed distances and is usually more globally stable than", "parameter is not very critical. If the cost function increases during initial optimization,", "of nearest neighbors that is used in other manifold learning algorithms. Larger datasets", "be at least 250. Default 1000 :param int num_iterations_without_progress: Maximum number of iterations", "by the random number generator; If RandomState instance, random_state is the random number", "called on each pair of instances (rows) and the resulting value recorded. The", "points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the", "threshold, the optimization will be stopped. Default 1e-7 :param metric: The metric to", "amount (e.g. 50) if the number of features is very high. This will", "of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function", "Maximum number of iterations without progress before we abort the optimization, used after", "a feature array. If metric is a string, it must be one of", "on the slower, but exact, algorithm in O(N^2) time. The exact algorithm should", "listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed to be a", "None] = None, method: str = 'barnes_hut', angle: float = 0.5 ) ->", "Angle less than 0.2 has quickly increasing computation time and angle greater 0.8", "be used when nearest-neighbor errors need to be better than 3%. However, the", "is \"euclidean\" which is interpreted as squared euclidean distance. Default 'euclidean' :type metric:", "import TSNE def tsne( embedding: np.ndarray, num_components: int = 2, perplexity: float =", "natural clusters in the original space are in the embedded space and how", "space between natural clusters will be larger in the embedded space. Again, the", "in the embedded space. Again, the choice of this parameter is not very", "be specified but was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations,", "than 3%. However, the exact method cannot scale to millions of examples. Default", "RandomState instance used by `np.random`. Note that different initializations might result in different", "not num_components: raise ValueError('num_components must be specified but was None') model = TSNE(", "factor or the learning rate might be too high. Default 12.0 :param float", "calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on", "method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce", "metric: str = \"euclidean\", init: str = \"random\", verbose: int = 1, random_state:", "to visualize high-dimensional data. It converts similarities between data points to joint probabilities", "them. The default is \"euclidean\" which is interpreted as squared euclidean distance. Default", "joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities", "to millions of examples. Default 'barnes_hut' :param float angle: Only used if method='barnes_hut'", "np.random.RandomState, None] = None, method: str = 'barnes_hut', angle: float = 0.5 )", "increasing the learning rate may help. Default 200.0 :param int num_iterations: Maximum number", "space will be between them. For larger values, the space between natural clusters", "values, the space between natural clusters will be larger in the embedded space.", "TSNE def tsne( embedding: np.ndarray, num_components: int = 2, perplexity: float = 30.0,", "0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing", "instances in a feature array. If metric is a string, it must be", "recommended to use another dimensionality reduction method (e.g. PCA for dense data or", "PCA for dense data or TruncatedSVD for sparse data) to reduce the number", "in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider", "the data. The components are sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding", "the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that", "This will suppress some noise and speed up the computation of pairwise distances", "changes in this parameter in the range of 0.2 - 0.8. Angle less", "stopped. Default 1e-7 :param metric: The metric to use when calculating distance between", "cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default the gradient", "Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3])", "exact method cannot scale to millions of examples. Default 'barnes_hut' :param float angle:", "take two arrays from X as input and return a value indicating the", "If the gradient norm is below this threshold, the optimization will be stopped.", "in which PCA will be applied :param int num_components: Dimension of the embedded", "similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler", "5 and 50. The choice is not extremely critical since t-SNE is quite", "O(N^2) time. The exact algorithm should be used when nearest-neighbor errors need to", "scale to millions of examples. Default 'barnes_hut' :param float angle: Only used if", "a reasonable amount (e.g. 50) if the number of features is very high.", "pair of instances (rows) and the resulting value recorded. The callable should take", "is not very critical. If the cost function increases during initial optimization, the", "The components are sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding is None:", "50. Default 300 :param float min_grad_norm: If the gradient norm is below this", "a numpy array of shape (n_samples, num_components). PCA initialization cannot be used with", "the directions of maximum variance in the data. The components are sorted by", "a value indicating the distance between them. The default is \"euclidean\" which is", "joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a", "\"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState, None] = None, method: str", "increasing error. Default 0.5 :return: A np.ndarray of principal axes in feature space,", "it is used as a summary node of all points contained within it.", "Maximum number of iterations for the optimization. Should be at least 250. Default", "Default 1e-7 :param metric: The metric to use when calculating distance between instances", "since t-SNE is quite insensitive to this parameter. Default 30.0 :param float early_exaggeration:", "A np.ndarray of principal axes in feature space, representing the directions of maximum", "algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the", "i.e. with different initializations we can get different results. It is highly recommended", "random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default the gradient calculation algorithm uses", "perplexity. Consider selecting a value between 5 and 50. The choice is not", "different initializations might result in different local minima of the cost function. :type", "TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable", "PCA initialization cannot be used with precomputed distances and is usually more globally", "between them. For larger values, the space between natural clusters will be larger", "was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric,", "to the next multiple of 50. Default 300 :param float min_grad_norm: If the", "learning rate might be too high. Default 12.0 :param float learning_rate: The learning", "algorithm in O(N^2) time. The exact algorithm should be used when nearest-neighbor errors", "is quite insensitive to this parameter. Default 30.0 :param float early_exaggeration: Controls how", "is \"precomputed\", X is assumed to be a distance matrix. Alternatively, if metric", "number of nearest neighbors that is used in other manifold learning algorithms. Larger", "was None') if not num_components: raise ValueError('num_components must be specified but was None')", "the embedded space. Again, the choice of this parameter is not very critical.", ":param metric: The metric to use when calculating distance between instances in a", "a string, it must be one of the options allowed by scipy.spatial.distance.pdist for", "Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default 1 :param random_state: If int,", "seed used by the random number generator; If RandomState instance, random_state is the", "array. If metric is a string, it must be one of the options", "default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact'", "verbose: Verbosity level. Default 1 :param random_state: If int, random_state is the seed", "as squared euclidean distance. Default 'euclidean' :type metric: Union[str, Callable] :param init: Initialization", "running in O(NlogN) time. method='exact' will run on the slower, but exact, algorithm", "a bad local minimum increasing the learning rate may help. Default 200.0 :param", "the slower, but exact, algorithm in O(N^2) time. The exact algorithm should be", "Default 30.0 :param float early_exaggeration: Controls how tight natural clusters in the original", "local minimum increasing the learning rate may help. Default 200.0 :param int num_iterations:", "`np.random`. Note that different initializations might result in different local minima of the", "t-SNE is quite insensitive to this parameter. Default 30.0 :param float early_exaggeration: Controls", "\"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a tool to visualize high-dimensional data.", "(n_samples, num_components). PCA initialization cannot be used with precomputed distances and is usually", "data. The components are sorted by variance` :rtype: numpy.ndarray \"\"\" if embedding is", "millions of examples. Default 'barnes_hut' :param float angle: Only used if method='barnes_hut' This", "str = 'barnes_hut', angle: float = 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic", "from typing import Union from sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components:", "look compressed in a dense cloud with few outliers. If the cost function", "value indicating the distance between them. The default is \"euclidean\" which is interpreted", "resulting value recorded. The callable should take two arrays from X as input", "between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence", "embedded space and how much space will be between them. For larger values,", "dimensions to a reasonable amount (e.g. 50) if the number of features is", "data. It converts similarities between data points to joint probabilities and tries to", "cost function that is not convex, i.e. with different initializations we can get", "between 5 and 50. The choice is not extremely critical since t-SNE is", "to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint", "the early exaggeration factor or the learning rate might be too high. Default", "for the optimization. Should be at least 250. Default 1000 :param int num_iterations_without_progress:", "its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\",", ") -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a tool to", "more globally stable than random initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param", "is the RandomState instance used by `np.random`. Note that different initializations might result", "quite insensitive to this parameter. Default 30.0 :param float early_exaggeration: Controls how tight", "is the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the", "Default 'random' :type init: Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default 1", "'angle' is the angular size (referred to as theta in [3]) of a", "random number generator; If None, the random number generator is the RandomState instance", "results. It is highly recommended to use another dimensionality reduction method (e.g. PCA", "with any point approximately equidistant from its nearest neighbours. If the learning rate", "quickly increasing computation time and angle greater 0.8 has quickly increasing error. Default", "of examples. Default 'barnes_hut' :param float angle: Only used if method='barnes_hut' This is", "1000 :param int num_iterations_without_progress: Maximum number of iterations without progress before we abort", "for sparse data) to reduce the number of dimensions to a reasonable amount", "Default 1 :param random_state: If int, random_state is the seed used by the", "a dense cloud with few outliers. If the cost function gets stuck in", "gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run", "50. The choice is not extremely critical since t-SNE is quite insensitive to", "up the computation of pairwise distances between samples. :param numpy.ndarray embedding: The embedding", "learning rate is too low, most points may look compressed in a dense", "usually in the range [10.0, 1000.0]. If the learning rate is too high,", "be applied :param int num_components: Dimension of the embedded space. Default 2 :param", "high-dimensional data. t-SNE has a cost function that is not convex, i.e. with", "the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed", "the choice of this parameter is not very critical. If the cost function", "a tool to visualize high-dimensional data. It converts similarities between data points to", "greater 0.8 has quickly increasing error. Default 0.5 :return: A np.ndarray of principal", "If metric is \"precomputed\", X is assumed to be a distance matrix. Alternatively,", "in O(N^2) time. The exact algorithm should be used when nearest-neighbor errors need", "probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost", "If the learning rate is too high, the data may look like a", "has quickly increasing computation time and angle greater 0.8 has quickly increasing error.", "metric to use when calculating distance between instances in a feature array. If", "random initialization. Default 'random' :type init: Union[string, numpy.ndarray] :param int verbose: Verbosity level.", "optimization will be stopped. Default 1e-7 :param metric: The metric to use when", "by `np.random`. Note that different initializations might result in different local minima of", "'barnes_hut', angle: float = 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding.", ":param int num_iterations: Maximum number of iterations for the optimization. Should be at", "method='exact' will run on the slower, but exact, algorithm in O(N^2) time. The", "generator is the RandomState instance used by `np.random`. Note that different initializations might", ":rtype: numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding must be specified but", "the RandomState instance used by `np.random`. Note that different initializations might result in", "to reduce the number of dimensions to a reasonable amount (e.g. 50) if", "them. For larger values, the space between natural clusters will be larger in", "t-distributed Stochastic Neighbor Embedding. t-SNE is a tool to visualize high-dimensional data. It", "embedded space. Default 2 :param float perplexity: The perplexity is related to the", "some noise and speed up the computation of pairwise distances between samples. :param", "0.8 has quickly increasing error. Default 0.5 :return: A np.ndarray of principal axes", "RandomState instance, random_state is the random number generator; If None, the random number", "250. Default 1000 :param int num_iterations_without_progress: Maximum number of iterations without progress before", "1e-7 :param metric: The metric to use when calculating distance between instances in", "has a cost function that is not convex, i.e. with different initializations we", "the space between natural clusters will be larger in the embedded space. Again,", "= 0.5 ) -> np.ndarray: \"\"\" t-distributed Stochastic Neighbor Embedding. t-SNE is a", "different initializations we can get different results. It is highly recommended to use", "neighbours. If the learning rate is too low, most points may look compressed", "None, the random number generator is the RandomState instance used by `np.random`. Note", "instance, random_state is the random number generator; If None, the random number generator", "the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing", "If the cost function gets stuck in a bad local minimum increasing the", "- 0.8. Angle less than 0.2 has quickly increasing computation time and angle", "early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state, method=method, angle=angle ) return", "early_exaggeration: float = 12.0, learning_rate: float = 200.0, num_iterations: int = 1000, num_iterations_without_progress:", "function gets stuck in a bad local minimum increasing the learning rate may", "in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed to be a distance", "learning_rate: float = 200.0, num_iterations: int = 1000, num_iterations_without_progress: int = 300, min_grad_norm:", "used after 250 initial iterations with early exaggeration. Note that progress is only", "random_state: If int, random_state is the seed used by the random number generator;", "very sensitive to changes in this parameter in the range of 0.2 -", "str = \"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState, None] = None,", "in this parameter in the range of 0.2 - 0.8. Angle less than", "of all points contained within it. This method is not very sensitive to", "np.ndarray, num_components: int = 2, perplexity: float = 30.0, early_exaggeration: float = 12.0,", "shape (n_samples, num_components). PCA initialization cannot be used with precomputed distances and is", "much space will be between them. For larger values, the space between natural", "init: str = \"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState, None] =", "of embedding. Possible options are 'random', 'pca', and a numpy array of shape", "input and return a value indicating the distance between them. The default is", "multiple of 50. Default 300 :param float min_grad_norm: If the gradient norm is", "function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default the gradient calculation", "point. If this size is below 'angle' then it is used as a", "of features is very high. This will suppress some noise and speed up", "if embedding is None: raise ValueError('embedding must be specified but was None') if", "early_exaggeration: Controls how tight natural clusters in the original space are in the", "It is highly recommended to use another dimensionality reduction method (e.g. PCA for", "the gradient norm is below this threshold, the optimization will be stopped. Default", "TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state, method=method,", "choice of this parameter is not very critical. If the cost function increases", "callable function, it is called on each pair of instances (rows) and the", "this parameter is not very critical. If the cost function increases during initial", "int = 2, perplexity: float = 30.0, early_exaggeration: float = 12.0, learning_rate: float", "tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional", "after 250 initial iterations with early exaggeration. Note that progress is only checked", "If the learning rate is too low, most points may look compressed in", "under the MIT license. import numpy as np from typing import Union from", "The embedding in which PCA will be applied :param int num_components: Dimension of", "number of iterations for the optimization. Should be at least 250. Default 1000", "if not num_components: raise ValueError('num_components must be specified but was None') model =", "is not very sensitive to changes in this parameter in the range of", "outliers. If the cost function gets stuck in a bad local minimum increasing", "initialization cannot be used with precomputed distances and is usually more globally stable", "numpy.random.RandomState]] :param str method: By default the gradient calculation algorithm uses Barnes-Hut approximation", "in the original space are in the embedded space and how much space", "data) to reduce the number of dimensions to a reasonable amount (e.g. 50)", "method cannot scale to millions of examples. Default 'barnes_hut' :param float angle: Only", "the MIT license. import numpy as np from typing import Union from sklearn.manifold", "in the embedded space and how much space will be between them. For", "it is called on each pair of instances (rows) and the resulting value", "learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state, method=method, angle=angle ) return model.fit_transform(embedding)", "method='barnes_hut' This is the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle'", "time. The exact algorithm should be used when nearest-neighbor errors need to be", "metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is \"precomputed\", X is assumed to be", "minima of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By", ":param float learning_rate: The learning rate for t-SNE is usually in the range", ":param float perplexity: The perplexity is related to the number of nearest neighbors", "the random number generator is the RandomState instance used by `np.random`. Note that", "X is assumed to be a distance matrix. Alternatively, if metric is a", "\"euclidean\" which is interpreted as squared euclidean distance. Default 'euclidean' :type metric: Union[str,", "range [10.0, 1000.0]. If the learning rate is too high, the data may", "the computation of pairwise distances between samples. :param numpy.ndarray embedding: The embedding in", "on each pair of instances (rows) and the resulting value recorded. The callable", "of shape (n_samples, num_components). PCA initialization cannot be used with precomputed distances and", "0.5 :return: A np.ndarray of principal axes in feature space, representing the directions", "larger in the embedded space. Again, the choice of this parameter is not", "Possible options are 'random', 'pca', and a numpy array of shape (n_samples, num_components).", "For larger values, the space between natural clusters will be larger in the", "low, most points may look compressed in a dense cloud with few outliers.", "algorithm should be used when nearest-neighbor errors need to be better than 3%.", "num_components: raise ValueError('num_components must be specified but was None') model = TSNE( n_components=num_components,", "200.0 :param int num_iterations: Maximum number of iterations for the optimization. Should be", "string, it must be one of the options allowed by scipy.spatial.distance.pdist for its", "X as input and return a value indicating the distance between them. The", "If int, random_state is the seed used by the random number generator; If", "be better than 3%. However, the exact method cannot scale to millions of", "as a summary node of all points contained within it. This method is", "but was None') if not num_components: raise ValueError('num_components must be specified but was", "and 50. The choice is not extremely critical since t-SNE is quite insensitive", "\"precomputed\", X is assumed to be a distance matrix. Alternatively, if metric is", "Optional[Union[int, numpy.random.RandomState]] :param str method: By default the gradient calculation algorithm uses Barnes-Hut", "node as measured from a point. If this size is below 'angle' then", "is interpreted as squared euclidean distance. Default 'euclidean' :type metric: Union[str, Callable] :param", "trade-off between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular size", "2 :param float perplexity: The perplexity is related to the number of nearest", "O(NlogN) time. method='exact' will run on the slower, but exact, algorithm in O(N^2)", "is the random number generator; If None, the random number generator is the", "'ball' with any point approximately equidistant from its nearest neighbours. If the learning", ":param float angle: Only used if method='barnes_hut' This is the trade-off between speed", "rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning", "metric is a callable function, it is called on each pair of instances", "1 :param random_state: If int, random_state is the seed used by the random", "equidistant from its nearest neighbours. If the learning rate is too low, most", "principal axes in feature space, representing the directions of maximum variance in the", "str = \"euclidean\", init: str = \"random\", verbose: int = 1, random_state: Union[int,", "space are in the embedded space and how much space will be between", "high. Default 12.0 :param float learning_rate: The learning rate for t-SNE is usually", "and angle greater 0.8 has quickly increasing error. Default 0.5 :return: A np.ndarray", "high. This will suppress some noise and speed up the computation of pairwise", "The callable should take two arrays from X as input and return a", "high, the data may look like a 'ball' with any point approximately equidistant", "is not extremely critical since t-SNE is quite insensitive to this parameter. Default", "this parameter in the range of 0.2 - 0.8. Angle less than 0.2", "visualize high-dimensional data. It converts similarities between data points to joint probabilities and", "larger perplexity. Consider selecting a value between 5 and 50. The choice is", "variance` :rtype: numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding must be specified", "need to be better than 3%. However, the exact method cannot scale to", "num_components: int = 2, perplexity: float = 30.0, early_exaggeration: float = 12.0, learning_rate:", "dense cloud with few outliers. If the cost function gets stuck in a", "Neighbor Embedding. t-SNE is a tool to visualize high-dimensional data. It converts similarities", "the trade-off between speed and accuracy for Barnpcaes-Hut T-SNE. 'angle' is the angular", "50) if the number of features is very high. This will suppress some", "iterations without progress before we abort the optimization, used after 250 initial iterations", "instances (rows) and the resulting value recorded. The callable should take two arrays", "as theta in [3]) of a distant node as measured from a point.", "the original space are in the embedded space and how much space will", "Default 12.0 :param float learning_rate: The learning rate for t-SNE is usually in", "which PCA will be applied :param int num_components: Dimension of the embedded space.", "of instances (rows) and the resulting value recorded. The callable should take two", "are 'random', 'pca', and a numpy array of shape (n_samples, num_components). PCA initialization", "distant node as measured from a point. If this size is below 'angle'", "reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to", "this value is rounded to the next multiple of 50. Default 300 :param", "MIT license. import numpy as np from typing import Union from sklearn.manifold import", "50 iterations so this value is rounded to the next multiple of 50.", "it must be one of the options allowed by scipy.spatial.distance.pdist for its metric", "uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the slower,", "embedding in which PCA will be applied :param int num_components: Dimension of the", "any point approximately equidistant from its nearest neighbours. If the learning rate is", "# Licensed under the MIT license. import numpy as np from typing import", "300 :param float min_grad_norm: If the gradient norm is below this threshold, the", "extremely critical since t-SNE is quite insensitive to this parameter. Default 30.0 :param", "Microsoft Corporation. # Licensed under the MIT license. import numpy as np from", "has quickly increasing error. Default 0.5 :return: A np.ndarray of principal axes in", "a value between 5 and 50. The choice is not extremely critical since", "theta in [3]) of a distant node as measured from a point. If", "is only checked every 50 iterations so this value is rounded to the", "distance matrix. Alternatively, if metric is a callable function, it is called on", "distance. Default 'euclidean' :type metric: Union[str, Callable] :param init: Initialization of embedding. Possible", "initial optimization, the early exaggeration factor or the learning rate might be too", "error. Default 0.5 :return: A np.ndarray of principal axes in feature space, representing", "Initialization of embedding. Possible options are 'random', 'pca', and a numpy array of", "checked every 50 iterations so this value is rounded to the next multiple", "random_state: Union[int, np.random.RandomState, None] = None, method: str = 'barnes_hut', angle: float =", "dense data or TruncatedSVD for sparse data) to reduce the number of dimensions", "reasonable amount (e.g. 50) if the number of features is very high. This", "insensitive to this parameter. Default 30.0 :param float early_exaggeration: Controls how tight natural", "(e.g. 50) if the number of features is very high. This will suppress", "init: Union[string, numpy.ndarray] :param int verbose: Verbosity level. Default 1 :param random_state: If", "iterations so this value is rounded to the next multiple of 50. Default", "data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between", "the high-dimensional data. t-SNE has a cost function that is not convex, i.e.", "is None: raise ValueError('embedding must be specified but was None') if not num_components:", "function increases during initial optimization, the early exaggeration factor or the learning rate", "int = 300, min_grad_norm: float = 1e-7, metric: str = \"euclidean\", init: str", "array of shape (n_samples, num_components). PCA initialization cannot be used with precomputed distances", "rate might be too high. Default 12.0 :param float learning_rate: The learning rate", "int = 1000, num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7, metric: str", "arrays from X as input and return a value indicating the distance between", "summary node of all points contained within it. This method is not very", "not convex, i.e. with different initializations we can get different results. It is", "the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default the", "Default 0.5 :return: A np.ndarray of principal axes in feature space, representing the", "Stochastic Neighbor Embedding. t-SNE is a tool to visualize high-dimensional data. It converts", "minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and", "a larger perplexity. Consider selecting a value between 5 and 50. The choice", "random number generator is the RandomState instance used by `np.random`. Note that different", "3%. However, the exact method cannot scale to millions of examples. Default 'barnes_hut'", "ValueError('num_components must be specified but was None') model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration,", "space. Again, the choice of this parameter is not very critical. If the", "it. This method is not very sensitive to changes in this parameter in", "too high. Default 12.0 :param float learning_rate: The learning rate for t-SNE is", "num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7, metric: str = \"euclidean\", init:", "parameter in the range of 0.2 - 0.8. Angle less than 0.2 has", "= TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state,", "in the range [10.0, 1000.0]. If the learning rate is too high, the", "very critical. If the cost function increases during initial optimization, the early exaggeration", "value between 5 and 50. The choice is not extremely critical since t-SNE", "numpy.ndarray embedding: The embedding in which PCA will be applied :param int num_components:", "as input and return a value indicating the distance between them. The default", "high-dimensional data. It converts similarities between data points to joint probabilities and tries", "range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation", "for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate", "the next multiple of 50. Default 300 :param float min_grad_norm: If the gradient", "num_components). PCA initialization cannot be used with precomputed distances and is usually more", "early exaggeration factor or the learning rate might be too high. Default 12.0", "result in different local minima of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]]", "size (referred to as theta in [3]) of a distant node as measured", "of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time", "will be stopped. Default 1e-7 :param metric: The metric to use when calculating", "rate may help. Default 200.0 :param int num_iterations: Maximum number of iterations for", "of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric", "level. Default 1 :param random_state: If int, random_state is the seed used by", "the optimization will be stopped. Default 1e-7 :param metric: The metric to use", "'pca', and a numpy array of shape (n_samples, num_components). PCA initialization cannot be", "as measured from a point. If this size is below 'angle' then it", "= \"euclidean\", init: str = \"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState,", "samples. :param numpy.ndarray embedding: The embedding in which PCA will be applied :param", "number of dimensions to a reasonable amount (e.g. 50) if the number of", "usually more globally stable than random initialization. Default 'random' :type init: Union[string, numpy.ndarray]", "points may look compressed in a dense cloud with few outliers. If the", "for Barnpcaes-Hut T-SNE. 'angle' is the angular size (referred to as theta in", "progress is only checked every 50 iterations so this value is rounded to", "the distance between them. The default is \"euclidean\" which is interpreted as squared", "numpy.ndarray] :param int verbose: Verbosity level. Default 1 :param random_state: If int, random_state", "exact, algorithm in O(N^2) time. The exact algorithm should be used when nearest-neighbor", "better than 3%. However, the exact method cannot scale to millions of examples.", "0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and", "be too high. Default 12.0 :param float learning_rate: The learning rate for t-SNE", "and return a value indicating the distance between them. The default is \"euclidean\"", "options are 'random', 'pca', and a numpy array of shape (n_samples, num_components). PCA", "and the high-dimensional data. t-SNE has a cost function that is not convex,", "Corporation. # Licensed under the MIT license. import numpy as np from typing", "model = TSNE( n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose,", "nearest neighbours. If the learning rate is too low, most points may look", "with early exaggeration. Note that progress is only checked every 50 iterations so", "the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has", ":type metric: Union[str, Callable] :param init: Initialization of embedding. Possible options are 'random',", "\"\"\" if embedding is None: raise ValueError('embedding must be specified but was None')", "1000.0]. If the learning rate is too high, the data may look like", "is a tool to visualize high-dimensional data. It converts similarities between data points", "and is usually more globally stable than random initialization. Default 'random' :type init:", "be larger in the embedded space. Again, the choice of this parameter is", "initializations we can get different results. It is highly recommended to use another", "of the cost function. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param str method: By default", "most points may look compressed in a dense cloud with few outliers. If", "all points contained within it. This method is not very sensitive to changes", "not very sensitive to changes in this parameter in the range of 0.2", "12.0, learning_rate: float = 200.0, num_iterations: int = 1000, num_iterations_without_progress: int = 300,", "numpy.ndarray \"\"\" if embedding is None: raise ValueError('embedding must be specified but was", "probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of", "float = 200.0, num_iterations: int = 1000, num_iterations_without_progress: int = 300, min_grad_norm: float", "generator; If RandomState instance, random_state is the random number generator; If None, the", "is too high, the data may look like a 'ball' with any point", "= 300, min_grad_norm: float = 1e-7, metric: str = \"euclidean\", init: str =", "angle: Only used if method='barnes_hut' This is the trade-off between speed and accuracy", "initializations might result in different local minima of the cost function. :type random_state:", "tsne( embedding: np.ndarray, num_components: int = 2, perplexity: float = 30.0, early_exaggeration: float", "= 200.0, num_iterations: int = 1000, num_iterations_without_progress: int = 300, min_grad_norm: float =", "function that is not convex, i.e. with different initializations we can get different", "Default 2 :param float perplexity: The perplexity is related to the number of", "should take two arrays from X as input and return a value indicating", "exaggeration. Note that progress is only checked every 50 iterations so this value", "only checked every 50 iterations so this value is rounded to the next", "be used with precomputed distances and is usually more globally stable than random", "import Union from sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components: int =", "pairwise distances between samples. :param numpy.ndarray embedding: The embedding in which PCA will", "original space are in the embedded space and how much space will be", "to this parameter. Default 30.0 :param float early_exaggeration: Controls how tight natural clusters", "may look like a 'ball' with any point approximately equidistant from its nearest", "of this parameter is not very critical. If the cost function increases during", "If None, the random number generator is the RandomState instance used by `np.random`.", "Default 1000 :param int num_iterations_without_progress: Maximum number of iterations without progress before we", "n_components=num_components, perplexity=perplexity, early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=num_iterations, n_iter_without_progress=num_iterations_without_progress, min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose, random_state=random_state, method=method, angle=angle", "2, perplexity: float = 30.0, early_exaggeration: float = 12.0, learning_rate: float = 200.0,", "will be larger in the embedded space. Again, the choice of this parameter", "Embedding. t-SNE is a tool to visualize high-dimensional data. It converts similarities between", "that is used in other manifold learning algorithms. Larger datasets usually require a", "before we abort the optimization, used after 250 initial iterations with early exaggeration.", "in a feature array. If metric is a string, it must be one", "might result in different local minima of the cost function. :type random_state: Optional[Union[int,", "If RandomState instance, random_state is the random number generator; If None, the random", "the optimization, used after 250 initial iterations with early exaggeration. Note that progress", "run on the slower, but exact, algorithm in O(N^2) time. The exact algorithm", "is the angular size (referred to as theta in [3]) of a distant", "is usually more globally stable than random initialization. Default 'random' :type init: Union[string,", "without progress before we abort the optimization, used after 250 initial iterations with", "1e-7, metric: str = \"euclidean\", init: str = \"random\", verbose: int = 1,", "is not convex, i.e. with different initializations we can get different results. It", "data. t-SNE has a cost function that is not convex, i.e. with different", "a distant node as measured from a point. If this size is below", "num_components: Dimension of the embedded space. Default 2 :param float perplexity: The perplexity", "which is interpreted as squared euclidean distance. Default 'euclidean' :type metric: Union[str, Callable]", "is below this threshold, the optimization will be stopped. Default 1e-7 :param metric:", "increases during initial optimization, the early exaggeration factor or the learning rate might", "If metric is a string, it must be one of the options allowed", "1, random_state: Union[int, np.random.RandomState, None] = None, method: str = 'barnes_hut', angle: float", "200.0, num_iterations: int = 1000, num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7,", "converts similarities between data points to joint probabilities and tries to minimize the", "the embedded space and how much space will be between them. For larger", "the cost function gets stuck in a bad local minimum increasing the learning", "= None, method: str = 'barnes_hut', angle: float = 0.5 ) -> np.ndarray:", "this parameter. Default 30.0 :param float early_exaggeration: Controls how tight natural clusters in", "cloud with few outliers. If the cost function gets stuck in a bad", "Dimension of the embedded space. Default 2 :param float perplexity: The perplexity is", "the resulting value recorded. The callable should take two arrays from X as", "computation time and angle greater 0.8 has quickly increasing error. Default 0.5 :return:", "critical. If the cost function increases during initial optimization, the early exaggeration factor", "must be specified but was None') if not num_components: raise ValueError('num_components must be", "scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric", "the seed used by the random number generator; If RandomState instance, random_state is", "between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE", "Union from sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components: int = 2,", "distances and is usually more globally stable than random initialization. Default 'random' :type", "will suppress some noise and speed up the computation of pairwise distances between", "are in the embedded space and how much space will be between them.", "nearest-neighbor errors need to be better than 3%. However, the exact method cannot", "float min_grad_norm: If the gradient norm is below this threshold, the optimization will", "min_grad_norm: If the gradient norm is below this threshold, the optimization will be", "import numpy as np from typing import Union from sklearn.manifold import TSNE def", "250 initial iterations with early exaggeration. Note that progress is only checked every", "sparse data) to reduce the number of dimensions to a reasonable amount (e.g.", "bad local minimum increasing the learning rate may help. Default 200.0 :param int", "raise ValueError('num_components must be specified but was None') model = TSNE( n_components=num_components, perplexity=perplexity,", "highly recommended to use another dimensionality reduction method (e.g. PCA for dense data", "callable should take two arrays from X as input and return a value", "from X as input and return a value indicating the distance between them.", "perplexity is related to the number of nearest neighbors that is used in", "abort the optimization, used after 250 initial iterations with early exaggeration. Note that", "less than 0.2 has quickly increasing computation time and angle greater 0.8 has", "this threshold, the optimization will be stopped. Default 1e-7 :param metric: The metric", "must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter,", "is usually in the range [10.0, 1000.0]. If the learning rate is too", "= 30.0, early_exaggeration: float = 12.0, learning_rate: float = 200.0, num_iterations: int =", "points contained within it. This method is not very sensitive to changes in", "(e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the", "is a callable function, it is called on each pair of instances (rows)", "(rows) and the resulting value recorded. The callable should take two arrays from", "and a numpy array of shape (n_samples, num_components). PCA initialization cannot be used", "below 'angle' then it is used as a summary node of all points", "0.8. Angle less than 0.2 has quickly increasing computation time and angle greater", "the data may look like a 'ball' with any point approximately equidistant from", "\"euclidean\", init: str = \"random\", verbose: int = 1, random_state: Union[int, np.random.RandomState, None]", "number generator; If None, the random number generator is the RandomState instance used", "int verbose: Verbosity level. Default 1 :param random_state: If int, random_state is the", "metric: Union[str, Callable] :param init: Initialization of embedding. Possible options are 'random', 'pca',", "approximation running in O(NlogN) time. method='exact' will run on the slower, but exact,", "iterations with early exaggeration. Note that progress is only checked every 50 iterations", "However, the exact method cannot scale to millions of examples. Default 'barnes_hut' :param", "from sklearn.manifold import TSNE def tsne( embedding: np.ndarray, num_components: int = 2, perplexity:", "num_iterations: int = 1000, num_iterations_without_progress: int = 300, min_grad_norm: float = 1e-7, metric:", "used by the random number generator; If RandomState instance, random_state is the random", "of principal axes in feature space, representing the directions of maximum variance in", "embedding and the high-dimensional data. t-SNE has a cost function that is not", "embedding: The embedding in which PCA will be applied :param int num_components: Dimension", "computation of pairwise distances between samples. :param numpy.ndarray embedding: The embedding in which", "Should be at least 250. Default 1000 :param int num_iterations_without_progress: Maximum number of", "will run on the slower, but exact, algorithm in O(N^2) time. The exact", "# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import numpy", "by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If", "min_grad_norm: float = 1e-7, metric: str = \"euclidean\", init: str = \"random\", verbose:", "a summary node of all points contained within it. This method is not", "default is \"euclidean\" which is interpreted as squared euclidean distance. Default 'euclidean' :type" ]
[ "# 'std': lambda x: x.std() # } # result = f.apply(past=past_aggr, target=lambda x:", "test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]}", "# demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} #", "seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'],", "# result = f.apply(past=past_aggr, target=lambda x: x[0]) # result = if __name__ ==", "= seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019',", "df def test_demo_missed(self): # 2nd january is lacked h = seq.Historical(self.df) daily =", "'01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} # h = tnb.Historical(df) # f", "= df.set_index('date') self.df = df def test_demo_missed(self): # 2nd january is lacked h", "df.set_index('date') self.df = df def test_demo_missed(self): # 2nd january is lacked h =", "2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def", "x: x.std() # } # result = f.apply(past=past_aggr, target=lambda x: x[0]) # result", "def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3,", "{ # 'weekly': lambda x: x[::7], # 'max': lambda x: x.max(), # 'std':", "pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'],", "unittest import os import sequentia as seq import pandas as pd class TestProjector(unittest.TestCase):", "'weekly': lambda x: x[::7], # 'max': lambda x: x.max(), # 'std': lambda x:", "['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date'])", "as pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature':", "= 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) #", "import unittest import os import sequentia as seq import pandas as pd class", "= pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def test_demo_missed(self): # 2nd january", "= seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self):", "= {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date']", "'std': lambda x: x.std() # } # result = f.apply(past=past_aggr, target=lambda x: x[0])", "TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]}", "h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = {", "h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def", "print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self):", "def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df", "[1, 3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df", "= tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { #", "# h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr =", "h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly': lambda x: x[::7], #", "missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head())", "# } # result = f.apply(past=past_aggr, target=lambda x: x[0]) # result = if", "'01-04-2019'], # 'temperature': [1, 3, 4]} # h = tnb.Historical(df) # f =", "'temperature': [1, 3, 4]} # h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past',", "import os import sequentia as seq import pandas as pd class TestProjector(unittest.TestCase): def", "tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly':", "# 2nd january is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be =", "import pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019',", "= { # 'weekly': lambda x: x[::7], # 'max': lambda x: x.max(), #", "# f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly': lambda", "3, 4]} # h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] #", "{'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date'] =", "'target'])[::'1day'] # past_aggr = { # 'weekly': lambda x: x[::7], # 'max': lambda", "'01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df =", "as seq import pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date':", "'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date')", "'max': lambda x: x.max(), # 'std': lambda x: x.std() # } # result", "def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo =", "'01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df", "4]} # h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr", "'year') print(h.head()) # def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], #", "= df def test_demo_missed(self): # 2nd january is lacked h = seq.Historical(self.df) daily", "x.std() # } # result = f.apply(past=past_aggr, target=lambda x: x[0]) # result =", "pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def test_demo_missed(self): #", "<reponame>sokolegg/titanoboa<gh_stars>0 import unittest import os import sequentia as seq import pandas as pd", "class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3,", "} # result = f.apply(past=past_aggr, target=lambda x: x[0]) # result = if __name__", "x: x[::7], # 'max': lambda x: x.max(), # 'std': lambda x: x.std() #", "{'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} # h = tnb.Historical(df)", "['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} # h = tnb.Historical(df) #", "import sequentia as seq import pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo", "past_aggr = { # 'weekly': lambda x: x[::7], # 'max': lambda x: x.max(),", "# past_aggr = { # 'weekly': lambda x: x[::7], # 'max': lambda x:", "f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly': lambda x:", "df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def test_demo_missed(self): # 2nd", "print(h.head()) # def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature':", "lambda x: x.std() # } # result = f.apply(past=past_aggr, target=lambda x: x[0]) #", "pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def test_demo_missed(self): # 2nd january is", "# def test_fragmentation(self): # demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1,", "demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} # h", "result = f.apply(past=past_aggr, target=lambda x: x[0]) # result = if __name__ == '__main__':", "test_demo_missed(self): # 2nd january is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be", "[1, 3, 4]} # h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day']", "df = df.set_index('date') self.df = df def test_demo_missed(self): # 2nd january is lacked", "seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h", "lambda x: x[::7], # 'max': lambda x: x.max(), # 'std': lambda x: x.std()", "= {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], # 'temperature': [1, 3, 4]} # h =", "pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1,", "self.df = df def test_demo_missed(self): # 2nd january is lacked h = seq.Historical(self.df)", "= h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month',", "lambda x: x.max(), # 'std': lambda x: x.std() # } # result =", "4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df = df", "df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def", "# 'temperature': [1, 3, 4]} # h = tnb.Historical(df) # f = h.fragmentate(['28days','1day'],", "demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df = pd.DataFrame(demo)", "missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo", "is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'],", "self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): #", "names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly': lambda x: x[::7], # 'max':", "os import sequentia as seq import pandas as pd class TestProjector(unittest.TestCase): def setUp(self):", "sequentia as seq import pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo =", "= f.apply(past=past_aggr, target=lambda x: x[0]) # result = if __name__ == '__main__': unittest.main()", "x[::7], # 'max': lambda x: x.max(), # 'std': lambda x: x.std() # }", "3, 4]} df = pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df =", "2nd january is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2", "= h.fragmentate(['28days','1day'], names=['past', 'target'])[::'1day'] # past_aggr = { # 'weekly': lambda x: x[::7],", "x: x.max(), # 'std': lambda x: x.std() # } # result = f.apply(past=past_aggr,", "setUp(self): demo = {'date': ['01-01-2019', '01-03-2019', '01-04-2019'], 'temperature': [1, 3, 4]} df =", "# 'max': lambda x: x.max(), # 'std': lambda x: x.std() # } #", "= pd.DataFrame(demo) df['date'] = pd.to_datetime(df['date']) df = df.set_index('date') self.df = df def test_demo_missed(self):", "# 'weekly': lambda x: x[::7], # 'max': lambda x: x.max(), # 'std': lambda", "x.max(), # 'std': lambda x: x.std() # } # result = f.apply(past=past_aggr, target=lambda", "def test_demo_missed(self): # 2nd january is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1]", "h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h = seq.Historical(self.df).expand('month', 'year')", "test_expand(self): h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo = {'date':", "january is lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head())", "h = seq.Historical(self.df).expand('month', 'year') print(h.head()) # def test_fragmentation(self): # demo = {'date': ['01-01-2019',", "seq import pandas as pd class TestProjector(unittest.TestCase): def setUp(self): demo = {'date': ['01-01-2019',", "lacked h = seq.Historical(self.df) daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be)", "daily = h.interpolate('linear')['12-31-2018','01-06-2019',1] missed_must_be = 2 print(daily.head()) self.assertEqual(daily['temperature']['01-02-2019'], missed_must_be) def test_expand(self): h =" ]
[ "bubble_sort(lista): for passnum in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp =", "for passnum in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i]", "lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1] = temp print(lista) return lista", "for i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1]", "def bubble_sort(lista): for passnum in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp", "<filename>part_2/week_14/bubbleSort.py def bubble_sort(lista): for passnum in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]:", "passnum in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i]", "if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1] = temp print(lista) return", "in range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] =", "range(len(lista)-1,0,-1): for i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1]", "i in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1] =", "in range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1] = temp", "range(passnum): if lista[i]>lista[i+1]: temp = lista[i] lista[i] = lista[i+1] lista[i+1] = temp print(lista)" ]
[ "kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained':", "# assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found:", "_ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even if there are", "= args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in", "args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target):", "found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if", "default=1, choices=[0, 1], help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug", "'(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to", "--patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids':", "args.training_mode is not None, \"training_mode is required\" # Important for automatic job retries", "is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model", "ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset ==", "1 if args.single_class else ds.num_classes if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset])", "schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping", "os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}')", "'none': # assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not", "a fixed \" \"target class (only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers',", "args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None", "import models from torchvision.datasets import CIFAR10 from . import boosters, constants from .utils", "i % args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move this part inside", "= loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume", "elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise", "help='If given, use object from file instead of Cube') # Zoom (bigger =", "= boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def", "# Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in", "--arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr", "to optimizer when optimizing the booster alone if args.training_mode in ['joint', 'model']: parameters", "from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't", "find (or download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster'])", "{args.single_class}\") # Transform everything to have the same label class_tx = lambda x,", "label class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader,", "premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\")", "apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir)", "2D boosters. It is # a bit tricky cause if we do that,", "boosters. It is # a bit tricky cause if we do that, we", "on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop", "pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=>", "import boosters, constants from .utils import custom_datasets, LinearModel from uuid import uuid4 #", "action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON config file **that will", "= model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4],", "split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else:", "outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if", "(bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed", "most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\") #", "dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model =", "using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random", "object from file instead of Cube') # Zoom (bigger = more zoomed out)", "Path import cox.store import cox.utils import dill import json import numpy as np", "== '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else:", "with ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook", "assert args.training_mode is not None, \"training_mode is required\" # Important for automatic job", "not in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else:", "(or download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch',", "JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) -", "help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset',", "import CIFAR10 from . import boosters, constants from .utils import custom_datasets, LinearModel from", "model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms to the", "example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg'", "if args.single_class else ds.num_classes if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs", "part inside the 2D boosters. It is # a bit tricky cause if", "args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval):", "of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating", "'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files for", "model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args()", "new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported yet slatta dog' assert", "class (only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true',", "if i % args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move this part", "model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook", "the cluster in case of premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name", "else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args =", "args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args,", "def get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif", "# Wrap the model wtith DataAugmentedModel even if there are not corruptions. #", "\"in single-class mode. If given, will be used as a fixed \" \"target", "use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet',", "help='Whether to resume training the DataAugmentedModel or not.' 'Useful to continue training if", "boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args,", "setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported yet slatta dog' assert args.training_mode", "loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if", "class_tx) model = get_boosted_model(args, ds) # Resume traing the boosted model from a", "argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser)", "sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}'", "avoid resuming for epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir)", "= datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet':", "'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None # Preprocess", "texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on", "custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar':", "class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx)", "target): if loop_type == 'val' or model.module.booster is None: return if args.training_mode in", "image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster,", "if args.augmentations else []) # don't pass checkpoint to train_model do avoid resuming", "model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path", "'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds =", "return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is", "type=str, help='Where to find (or download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70)", "help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python", "'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped:", "instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see constants.py for", "store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards", "checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v in", "print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None # Preprocess args default_ds = args.dataset", "args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store", "args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in", "parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int,", "= save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir", "get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset", "retries on the cluster in case of premptions. Avoid uuids. if args.exp_name ==", "lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader =", "BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the", "args.exp_name) if 'metadata' not in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata',", "cannot save the \"corrupted\" # boosted images, but only the boosted images if", "== 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1 if", "args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel", "in case of premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name = str(uuid4())", "= defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom", "parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see constants.py for details)\") # Render", "else ds.num_classes if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch':", "model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir) /", "if args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if", "frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead", "args.boosting == 'none': # assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise", "return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args", "Preprocess args default_ds = args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args =", "help='Path to a checkpoint to load (useful for training a patch using a", "args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug,", "ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms)", "usage: python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp", "store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args() if args.json_config is", "For consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations", "3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class mode. If given,", "from torch import nn from torchvision import models from torchvision.datasets import CIFAR10 from", "get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch =", "save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir =", "cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping this part.]')", "help='Path to a JSON config file **that will override argparse args**') ## Arguments", "model = get_boosted_model(args, ds) # Resume traing the boosted model from a checkpoint", "ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model =", "print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model,", "ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light':", "optimizer when optimizing the booster alone if args.training_mode in ['joint', 'model']: parameters =", "cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint", "'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1 if args.single_class", "random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to", "= datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size,", "choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq',", "zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5,", "Important for automatic job retries on the cluster in case of premptions. Avoid", "Avoid uuids. if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert", "model]') return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] # avoids empty param", "as ch from robustness import datasets, defaults, loaders, model_utils, train from robustness.tools import", "model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move this", "/ 'better_corruptions')) import argparse import os from pathlib import Path import cox.store import", "model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path)", "num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting ==", "parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from file instead of Cube')", "return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and", "/ f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir = save_dir", "{ 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions", "the boosted model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint =", "= os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming training", "download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str,", "model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't pass checkpoint", "args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds])", "'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even", "for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class mode. If", "[model.dummy] # avoids empty param list to optimizer when optimizing the booster alone", "not None: print(f\"Boosting towards a single class {args.single_class}\") # Transform everything to have", "breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005)", "args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1", "to find (or download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str,", "k) for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args,", "defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if", "if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp,", "boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS,", "the DataAugmentedModel or not.' 'Useful to continue training if job is pre-empted.' '(Overrides", "parser.add_argument('--json-config', help='Path to a JSON config file **that will override argparse args**') ##", "--patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds =", "in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset}", "update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args() if args.json_config is not None:", "elif args.boosting == 'none': # assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else:", "return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] # avoids empty param list", "help='Where to find (or download) info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode',", "is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a", "val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch", "Example usage: python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name", "boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom,", "not args.adv_train, 'not supported yet slatta dog' assert args.training_mode is not None, \"training_mode", "constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use", "torchvision.datasets import CIFAR10 from . import boosters, constants from .utils import custom_datasets, LinearModel", "if args.boosting != '3d': inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target)", "f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir = save_dir /", "torchvision import models from torchvision.datasets import CIFAR10 from . import boosters, constants from", "args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None # Preprocess args", "args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster", "BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = {", "= ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model", "zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\")", "datasets, defaults, loaders, model_utils, train from robustness.tools import breeds_helpers from torch import nn", "the boosted images if args.boosting != '3d': inp, target = inp.cuda(), target.cuda() example_boosted", "if __name__ == \"__main__\": args = parser.parse_args() if args.json_config is not None: print(\"Overriding", "args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir =", "inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir)", "traing the boosted model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint", "Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store) parameters", "num_classes = 1 if args.single_class else ds.num_classes if arch == 'linear': arch =", "os from pathlib import Path import cox.store import cox.utils import dill import json", "booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON config file", "\" \"in single-class mode. If given, will be used as a fixed \"", "== 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset ==", "corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': # assert args.eval_only", "resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming", "default_ds = args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args,", ". import boosters, constants from .utils import custom_datasets, LinearModel from uuid import uuid4", "for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args, k,", "parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one", "'val' or model.module.booster is None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if", "a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms", "parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON config file **that", "constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d':", "ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset", "supported yet slatta dog' assert args.training_mode is not None, \"training_mode is required\" #", "main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting", "to have the same label class_tx = lambda x, y: (x, ch.ones_like(y) *", "train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args,", "= 1 if args.single_class else ds.num_classes if arch == 'linear': arch = LinearModel(num_classes,", "if we do that, we cannot save the \"corrupted\" # boosted images, but", "parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none',", "<gh_stars>10-100 import pathlib import sys from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0,", "images, but only the boosted images if args.boosting != '3d': inp, target =", "args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even if", "type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from file instead of Cube') #", "elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif", "= ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()}", "breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds", "if loop_type == 'val' or model.module.booster is None: return if args.training_mode in ['booster',", "assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in", "(x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model", "stuff') parser.add_argument('--json-config', help='Path to a JSON config file **that will override argparse args**')", "**that will override argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether", "in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light',", "target, save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return", "(i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\")", "pathlib import Path import cox.store import cox.utils import dill import json import numpy", "type=int, default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str,", "default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last',", "default=1) parser.add_argument('--custom-file', help='If given, use object from file instead of Cube') # Zoom", "adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last:", "split) else: raise NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers,", "constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1 if args.single_class else ds.num_classes if", "model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not", "save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os from", "= ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom,", "json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k", "== 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else:", "if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim,", "None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file,", "default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last", "type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir',", "info files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18')", "(useful for training a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int,", "corruptions in the loop (see constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int,", "Transform everything to have the same label class_tx = lambda x, y: (x,", "default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How", "choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or", "def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not None:", "args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path)", "k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k])", "do that, we cannot save the \"corrupted\" # boosted images, but only the", "help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path", "instead of Cube') # Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20,", "parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\"", "(epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]')", "import os from pathlib import Path import cox.store import cox.utils import dill import", "defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args(", "file instead of Cube') # Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int,", "= datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data,", "default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str,", "patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply", "iteration_hook(model, i, loop_type, inp, target): if loop_type == 'val' or model.module.booster is None:", "split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split =", "is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if", "tricky cause if we do that, we cannot save the \"corrupted\" # boosted", "load (useful for training a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms',", "lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py", "(train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards a single", "import json import numpy as np import torch as ch from robustness import", "main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10", "dill import json import numpy as np import torch as ch from robustness", "resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01", "out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum", "--batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\"", "Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from file instead", "datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and", "# Preprocess args default_ds = args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args", "inside the 2D boosters. It is # a bit tricky cause if we", "#TODO: Move this part inside the 2D boosters. It is # a bit", "Resume traing the boosted model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest')", "or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith", "zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int,", "datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset", "else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model", "(Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use", "in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str,", "the \"corrupted\" # boosted images, but only the boosted images if args.boosting !=", "bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir /", "defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments", "= defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args =", "Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options", "training the DataAugmentedModel or not.' 'Useful to continue training if job is pre-empted.'", "inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir /", "help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where", "json import numpy as np import torch as ch from robustness import datasets,", "defaults, loaders, model_utils, train from robustness.tools import breeds_helpers from torch import nn from", "'3d': inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir", "cox.utils import dill import json import numpy as np import torch as ch", "parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true',", "blender rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in", "bit tricky cause if we do that, we cannot save the \"corrupted\" #", "parser.add_argument('--custom-file', help='If given, use object from file instead of Cube') # Zoom (bigger", "fixed \" \"target class (only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1,", "loop (see constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If", "booster alone if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i,", "empty param list to optimizer when optimizing the booster alone if args.training_mode in", "loaders, model_utils, train from robustness.tools import breeds_helpers from torch import nn from torchvision", "= argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS,", "checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v", "with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args), set(new_args.keys())", "} corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim,", "= datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class =", "(lightest)\") \"\"\" Example usage: python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir", "'3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1],", "help=\"Whether to act \" \"in single-class mode. If given, will be used as", "type=int, default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the", "{})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return", "training if job is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None,", "'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the", "'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files for breeds')", "'not supported yet slatta dog' assert args.training_mode is not None, \"training_mode is required\"", "0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store,", "args.json_config is not None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args,", "args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data)", "if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(),", "defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS,", "should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead of all\")", "if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir", "boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't pass checkpoint to train_model", "numpy as np import torch as ch from robustness import datasets, defaults, loaders,", "not corruptions. # For consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name,", "# avoids empty param list to optimizer when optimizing the booster alone if", "parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum", "= json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for", "= LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model", "boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class mode. If given, will", "args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds)", "is None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq", "args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name)", "'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO:", "required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'],", "transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a", "we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead of", "parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently", "type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom", "model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even if there", "args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path", "/ f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path", "from file instead of Cube') # Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom',", "= defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store =", "more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom',", "'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files", "import nn from torchvision import models from torchvision.datasets import CIFAR10 from . import", "'better_corruptions')) import argparse import os from pathlib import Path import cox.store import cox.utils", "optimizing the booster alone if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def", "if 'metadata' not in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema)", "checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only:", "model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if loop_type == 'val' or model.module.booster", "'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if loop_type ==", "else: raise NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True)", "save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir = save_dir / f'iteration_{i}'", "from robustness.tools import breeds_helpers from torch import nn from torchvision import models from", "ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data,", "arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1 if args.single_class else", "val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds):", "if is_pt_model else args.arch num_classes = 1 if args.single_class else ds.num_classes if arch", "= [model.dummy] # avoids empty param list to optimizer when optimizing the booster", "resume training the DataAugmentedModel or not.' 'Useful to continue training if job is", "k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset:", "= cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict = args.__dict__ schema =", "parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether", "ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data)", "args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS", "'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move", "args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args", "Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)')", "= get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards a single class {args.single_class}\")", "parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) #", "custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset ==", "ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook =", "'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download) info", "help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations", "ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class", "= model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if loop_type == 'val' or", "= datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif", "/ BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster =", "target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path =", "False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args =", "the same label class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader", "= get_boosted_model(args, ds) # Resume traing the boosted model from a checkpoint resume_path", "model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if", "# ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser", "Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\")", "# boosted images, but only the boosted images if args.boosting != '3d': inp,", "import datasets, defaults, loaders, model_utils, train from robustness.tools import breeds_helpers from torch import", "print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\")", "breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index train_loader, val_loader", "train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] # avoids empty param list to", "arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm',", "defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds])", "= constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model,", "raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader)", "help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting", "is # a bit tricky cause if we do that, we cannot save", "if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train", "to a JSON config file **that will override argparse args**') ## Arguments for", "mode. If given, will be used as a fixed \" \"target class (only", "example_boosted = model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried =", "train from robustness.tools import breeds_helpers from torch import nn from torchvision import models", "config file **that will override argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class',", "split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index", "= loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume traing the boosted model", "parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset", "help=\"Add corruptions in the loop (see constants.py for details)\") # Render configuration parser.add_argument('--render-samples',", "in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if", "custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': # assert", "# Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting", "the model]') return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] # avoids empty", "forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see", "args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) # Wrap", "args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp')", "single-class mode. If given, will be used as a fixed \" \"target class", "that, we cannot save the \"corrupted\" # boosted images, but only the boosted", "store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping this part.]') print(args) main_trainer(args, store)", "defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'],", "if args.single_class is not None: print(f\"Boosting towards a single class {args.single_class}\") # Transform", "dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting ==", "\" \"target class (only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int)", "most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float,", "choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1,", "with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def", "from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume", "torch import nn from torchvision import models from torchvision.datasets import CIFAR10 from .", "\"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args(", "defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys:", "target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg'", "def iteration_hook(model, i, loop_type, inp, target): if loop_type == 'val' or model.module.booster is", "= defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent',", "= parser.parse_args() if args.json_config is not None: print(\"Overriding args with JSON...\") new_args =", "inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4],", "on the cluster in case of premptions. Avoid uuids. if args.exp_name == 'random':", "args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from", "robustness.tools import breeds_helpers from torch import nn from torchvision import models from torchvision.datasets", "import pathlib import sys from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path", "args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError", "parameters = [model.dummy] # avoids empty param list to optimizer when optimizing the", "None, \"training_mode is required\" # Important for automatic job retries on the cluster", "'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset", "will be used as a fixed \" \"target class (only optimize ONE texture", "else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader,", "this part inside the 2D boosters. It is # a bit tricky cause", "target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried", "= save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path)", "override argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act", "sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os from pathlib import Path import", "parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17',", "all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass", "defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False", "constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster,", "x, y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader,", "argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \"", "parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to load (useful for training a", "= cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping this", "checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't pass", "sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} |", "else []) # don't pass checkpoint to train_model do avoid resuming for epoch,", "rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the", "instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel or not.'", "from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser", "type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50,", "parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep", "the last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the", "import breeds_helpers from torch import nn from torchvision import models from torchvision.datasets import", "datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped'", "args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__", "save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path = save_dir", "parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting',", "BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes,", "parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser)", "= defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in", "wtith DataAugmentedModel even if there are not corruptions. # For consistenct when loading", "[]) # don't pass checkpoint to train_model do avoid resuming for epoch, optimizers", "default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files for breeds') parser.add_argument('--patch-size',", "in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir = Path(store.save_dir)", "save the \"corrupted\" # boosted images, but only the boosted images if args.boosting", "args.exp_name != None # Preprocess args default_ds = args.dataset if args.dataset in datasets.DATASETS", "parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int,", "of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel or not.' 'Useful", "CIFAR10 from . import boosters, constants from .utils import custom_datasets, LinearModel from uuid", "datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds", "'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ =", "constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers,", "checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args,", "# a bit tricky cause if we do that, we cannot save the", "f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg' adv_path =", "val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet'", "across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward", "have the same label class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class)", "get_boosted_model(args, ds) # Resume traing the boosted model from a checkpoint resume_path =", "import argparse import os from pathlib import Path import cox.store import cox.utils import", "* args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args,", "= defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser =", "NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds,", "Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster", "robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False)", "{store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if", "case of premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment", "store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict = args.__dict__ schema", "default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None,", "# For consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if", "list to optimizer when optimizing the booster alone if args.training_mode in ['joint', 'model']:", "uuids. if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name", "args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions", "args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster =", "the booster alone if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model,", "store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing", "ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds =", "{args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store) parameters =", "is not None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k)", "save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead of all\") parser.add_argument('--resume',", "of premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name:", "epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if", "model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch'])) print(f\"Dataset: {args.dataset} | Model:", "a bit tricky cause if we do that, we cannot save the \"corrupted\"", "datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict = args.__dict__", "= 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser)", "act \" \"in single-class mode. If given, will be used as a fixed", "we cannot save the \"corrupted\" # boosted images, but only the boosted images", "ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index train_loader, val_loader =", "of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see constants.py for details)\")", "= model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even if there are not", "given, use object from file instead of Cube') # Zoom (bigger = more", "save_image(example_adversaried[:4], adv_path) else: if not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with", "is not None, \"training_mode is required\" # Important for automatic job retries on", "param list to optimizer when optimizing the booster alone if args.training_mode in ['joint',", "k in new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported yet slatta", "!= None # Preprocess args default_ds = args.dataset if args.dataset in datasets.DATASETS else", "files for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr',", "val_loader) = get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards a single class", "model_utils, train from robustness.tools import breeds_helpers from torch import nn from torchvision import", "pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint", "type=int, default=1, choices=[0, 1], help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print", "booster, args.training_mode) elif args.boosting == 'none': # assert args.eval_only model = boosters.BoostedModel(model, None,", "\"training_mode is required\" # Important for automatic job retries on the cluster in", "'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster", "constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear',", "checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]')", "elif args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster =", "iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\":", "str(curr_path / 'better_corruptions')) import argparse import os from pathlib import Path import cox.store", "parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true',", "pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os from pathlib import Path", "there are not corruptions. # For consistenct when loading from checkpoints model =", "np import torch as ch from robustness import datasets, defaults, loaders, model_utils, train", "parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save", "args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset", "Move this part inside the 2D boosters. It is # a bit tricky", "store=store) parameters = [model.dummy] # avoids empty param list to optimizer when optimizing", "booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions)", "args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move this part inside the 2D", "args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None", "- set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not", "yet slatta dog' assert args.training_mode is not None, \"training_mode is required\" # Important", "everything to have the same label class_tx = lambda x, y: (x, ch.ones_like(y)", "JSON config file **that will override argparse args**') ## Arguments for 3D boosters:", "cox.store import cox.utils import dill import json import numpy as np import torch", "even if there are not corruptions. # For consistenct when loading from checkpoints", "loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume traing the boosted model from", "for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path, checkpoint['epoch']))", "all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args:", "if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model,", "0: save_dir = Path(store.save_dir) #TODO: Move this part inside the 2D boosters. It", "type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save images\")", "or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS,", "don't pass checkpoint to train_model do avoid resuming for epoch, optimizers etc. if", "args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds,", "f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if not", "defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict =", "datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split)", "store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping this part.]') print(args)", "keep the last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training", "None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint =", "help=\"Only keep the last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume", "'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path,", "if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path,", "else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render,", "robustness import datasets, defaults, loaders, model_utils, train from robustness.tools import breeds_helpers from torch", "__name__ == \"__main__\": args = parser.parse_args() if args.json_config is not None: print(\"Overriding args", "import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os", "'3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim", "corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size,", "(train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args() if", "import custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser", "== 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None #", "import torch as ch from robustness import datasets, defaults, loaders, model_utils, train from", "type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true')", "a checkpoint to load (useful for training a patch using a pretrained model).')", "args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict", "ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd)", "1], help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config',", "configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from file instead of", "type=str, default=None, help='Path to a checkpoint to load (useful for training a patch", "argparse import os from pathlib import Path import cox.store import cox.utils import dill", "k, new_args[k]) assert not args.adv_train, 'not supported yet slatta dog' assert args.training_mode is", "help=\"Use blender rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions", "not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir)", "to continue training if job is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path',", "'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0,", "python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size", "= constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light,", "i, loop_type, inp, target): if loop_type == 'val' or model.module.booster is None: return", "for automatic job retries on the cluster in case of premptions. Avoid uuids.", "not.' 'Useful to continue training if job is pre-empted.' '(Overrides the one in", "= more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\")", "alone if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type,", "only the boosted images if args.boosting != '3d': inp, target = inp.cuda(), target.cuda()", "| Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store)", "workers=args.workers, data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in", "None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k", "10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds", "and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd", "and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args(", "parser) parser = defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent',", "TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader, val_loader)", "os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel", "forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': #", "lighting (lightest)\") \"\"\" Example usage: python main.py --arch resnet50 --dataset cifar --batch-size 64", ".utils import custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch'", "\"__main__\": args = parser.parse_args() if args.json_config is not None: print(\"Overriding args with JSON...\")", "if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom':", "action='store_true', help='Whether to resume training the DataAugmentedModel or not.' 'Useful to continue training", "parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms to the booster.')", "the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to load", "'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find (or download)", "(darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py --arch", "if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy]", "type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions',", "model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': # assert args.eval_only model", "will override argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to", "(see constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given,", "loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume traing", "(args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args,", "['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0: save_dir = Path(store.save_dir) #TODO:", "breeds_helpers from torch import nn from torchvision import models from torchvision.datasets import CIFAR10", "default=40, help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum", "Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most", "Wrap the model wtith DataAugmentedModel even if there are not corruptions. # For", "= Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset]", "ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't pass checkpoint to train_model do", "if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster =", "= lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader", "parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet')", "parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained',", "train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) #", "from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v", "images if args.boosting != '3d': inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp,", "parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass instead of", "for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object", "datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0]", "default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005)", "tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset ==", "# Important for automatic job retries on the cluster in case of premptions.", "class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume traing the", "the 2D boosters. It is # a bit tricky cause if we do", "schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store. Skipping this part.]') print(args) main_trainer(args,", "{args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class", "out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40,", "= inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4],", "else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light,", "args.boosting != '3d': inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path", "is_pt_model else args.arch num_classes = 1 if args.single_class else ds.num_classes if arch ==", "for breeds') parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float,", "= boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) # don't pass checkpoint to", "checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of", "args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else", "a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and", "== \"__main__\": args = parser.parse_args() if args.json_config is not None: print(\"Overriding args with", "name: {args.exp_name}\") assert args.exp_name != None # Preprocess args default_ds = args.dataset if", "parser.add_argument('--info-dir', type=str, help='Where to find (or download) info files for breeds') parser.add_argument('--patch-size', type=int,", "details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from", "booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode)", "## Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class", "type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example", "ds, (train_loader, val_loader) = get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards a", "models from torchvision.datasets import CIFAR10 from . import boosters, constants from .utils import", "= 'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split)", "import numpy as np import torch as ch from robustness import datasets, defaults,", "job retries on the cluster in case of premptions. Avoid uuids. if args.exp_name", "do avoid resuming for epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path =", "\"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]})", "datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args", "i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader,", "datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds])", "import dill import json import numpy as np import torch as ch from", "== 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader),", "not found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) = get_dataset_and_loaders(args)", "the loop (see constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file',", "boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim =", "required\" # Important for automatic job retries on the cluster in case of", "images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass instead", "action='store_true', help=\"Use blender rendering on forward pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add", "train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args =", "ch from robustness import datasets, defaults, loaders, model_utils, train from robustness.tools import breeds_helpers", "boosted images if args.boosting != '3d': inp, target = inp.cuda(), target.cuda() example_boosted =", "import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS,", "elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset", "arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _", "args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args),", "set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k]) assert not args.adv_train,", "sd = checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded", "parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser = defaults.add_args_to_parser(defaults.TRAINING_ARGS, parser) parser", "args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args,", "zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed", "= defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS,", "save_dir = Path(store.save_dir) #TODO: Move this part inside the 2D boosters. It is", "help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON config file **that will override", "'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if", "towards a single class {args.single_class}\") # Transform everything to have the same label", "= args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model", "y: (x, ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx)", "etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster", "inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path)", "import cox.utils import dill import json import numpy as np import torch as", "boosted model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None", "'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a", "os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd =", "= datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split =", "pass checkpoint to train_model do avoid resuming for epoch, optimizers etc. if args.boosting", "= defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides", "tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13',", "parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations',", "as a fixed \" \"target class (only optimize ONE texture across all images)\")", "ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args, store): ds, (train_loader, val_loader) =", "checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args() if args.json_config is not", "custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser =", "(train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset ==", "or model.module.booster is None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i", "['joint', 'model']: parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if loop_type", "not None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for", "continue training if job is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str,", "args.training_mode) elif args.boosting == 'none': # assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode)", "to load (useful for training a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true')", "print('[Resuming training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model']", "constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds", "(only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use", "model wtith DataAugmentedModel even if there are not corruptions. # For consistenct when", "model, val_loader, store=store) parameters = [model.dummy] # avoids empty param list to optimizer", "constants from .utils import custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP", "print(f\"Boosting towards a single class {args.single_class}\") # Transform everything to have the same", "str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None # Preprocess args default_ds =", "'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions =", "== 'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif", "ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser =", "== 'val' or model.module.booster is None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr)", "BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd =", "for k in new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported yet", "not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or", "= ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model,", "choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g.", "if args.json_config is not None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert", "joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size':", "bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir", "Arguments for 3D boosters: parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class mode.", "the model wtith DataAugmentedModel even if there are not corruptions. # For consistenct", "boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': # assert args.eval_only model = boosters.BoostedModel(model,", "/ f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved", "parser.add_argument('--single-class', type=int, help=\"Whether to act \" \"in single-class mode. If given, will be", "None # Preprocess args default_ds = args.dataset if args.dataset in datasets.DATASETS else \"imagenet\"", "from torchvision.datasets import CIFAR10 from . import boosters, constants from .utils import custom_datasets,", "# Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom (i.e.,", "/ BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options =", "single class {args.single_class}\") # Transform everything to have the same label class_tx =", "in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters)", "be used as a fixed \" \"target class (only optimize ONE texture across", "parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py --arch resnet50", "args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size,", "automatic job retries on the cluster in case of premptions. Avoid uuids. if", "from .utils import custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP =", "val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ == \"__main__\": args = parser.parse_args() if args.json_config", "new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k in new_args), set(new_args.keys()) - set(vars(args).keys())", "file **that will override argparse args**') ## Arguments for 3D boosters: parser.add_argument('--single-class', type=int,", "else: if not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp,", "'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif", "parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true',", "class {args.single_class}\") # Transform everything to have the same label class_tx = lambda", "not None, \"training_mode is required\" # Important for automatic job retries on the", "assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}')", "It is # a bit tricky cause if we do that, we cannot", "If given, will be used as a fixed \" \"target class (only optimize", "args.arch num_classes = 1 if args.single_class else ds.num_classes if arch == 'linear': arch", "a single class {args.single_class}\") # Transform everything to have the same label class_tx", "dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light':", "avoids empty param list to optimizer when optimizing the booster alone if args.training_mode", "= breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index train_loader,", "# don't pass checkpoint to train_model do avoid resuming for epoch, optimizers etc.", "args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not", "optimizers etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists():", "help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py --arch resnet50 --dataset cifar --batch-size", "= { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples }", "'metadata' not in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict)", "type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr', type=float,", "torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import", "from robustness import datasets, defaults, loaders, model_utils, train from robustness.tools import breeds_helpers from", "args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city':", "type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids',", "boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if", "visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel or", "used as a fixed \" \"target class (only optimize ONE texture across all", "ds) # Resume traing the boosted model from a checkpoint resume_path = os.path.join(args.out_dir,", "= model.module.booster(inp, target) bs_path = save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted)", "{args.exp_name}\") assert args.exp_name != None # Preprocess args default_ds = args.dataset if args.dataset", "in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only:", "train_model do avoid resuming for epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path", "a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1],", "a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd = {k[len('module.'):]:v for", "defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS,", "if False and (args.adv_train or args.adv_eval): args = defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args", "import sys from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions'))", "help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not", "all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel or not.' 'Useful to", "{'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model,", "of Cube') # Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum", "in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch", "as np import torch as ch from robustness import datasets, defaults, loaders, model_utils,", "checkpoint to train_model do avoid resuming for epoch, optimizers etc. if args.boosting ==", "'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path) else: dim", "dog' assert args.training_mode is not None, \"training_mode is required\" # Important for automatic", "args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return", "action='store_true', help=\"Only keep the last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to", "# Render configuration parser.add_argument('--render-samples', type=int, default=1) parser.add_argument('--custom-file', help='If given, use object from file", "None: print(f\"Boosting towards a single class {args.single_class}\") # Transform everything to have the", "== 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds = datasets.ImageNet(args.data) if", "= save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir /", "uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser =", "== 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset == 'city': ds", "robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to load (useful for training", "ds.num_classes if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch,", "args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds])", "tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model,", "if not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target,", "and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes =", "in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to load (useful for", "loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else []) #", "\"corrupted\" # boosted images, but only the boosted images if args.boosting != '3d':", "booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] booster = boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size,", "one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to a checkpoint to load (useful", "last visualizations instead of all\") parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel", "assert args.exp_name != None # Preprocess args default_ds = args.dataset if args.dataset in", "--exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset", "are not corruptions. # For consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model,", "save_dir=save_dir) if i == 0: print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args,", "in new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported yet slatta dog'", "--training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors,", "= {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch", "elif args.dataset == 'city': ds = datasets.ImageNet(args.data) elif args.dataset == 'cifar': ds =", "constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples':", "'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes,", "pathlib import sys from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path /", "(i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light',", "args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds =", "job is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path to", "type=float, default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we", "to resume training the DataAugmentedModel or not.' 'Useful to continue training if job", "pass instead of matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see constants.py", "type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py --arch resnet50 --dataset", "args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset ==", "elif args.dataset == 'cifar': ds = datasets.CIFAR('/tmp') elif args.dataset == 'imagenet': ds =", "# TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return ds, (train_loader,", "args = parser.parse_args() if args.json_config is not None: print(\"Overriding args with JSON...\") new_args", "= boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none': # assert args.eval_only model =", "default=0.005) parser.add_argument('--patch-lr', type=float, default=0.005) parser.add_argument('--pytorch-pretrained', action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should", "model.module.booster is None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i %", "= {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained}", "training a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0,", "print(f'Saved in {store.save_dir}') args.iteration_hook = iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint,", "when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else [])", "0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args): if args.dataset == 'solids': ds = datasets.ImageNet(args.data,", "DataAugmentedModel even if there are not corruptions. # For consistenct when loading from", "'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples } corruptions = constants.THREE_D_CORRUPTIONS if args.add_corruptions else", "Path(store.save_dir) #TODO: Move this part inside the 2D boosters. It is # a", "or not.' 'Useful to continue training if job is pre-empted.' '(Overrides the one", "boosted images, but only the boosted images if args.boosting != '3d': inp, target", "when optimizing the booster alone if args.training_mode in ['joint', 'model']: parameters = model.boosted_model.parameters()", "new_args[k]) assert not args.adv_train, 'not supported yet slatta dog' assert args.training_mode is not", "None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda() def main_trainer(args, store):", "= pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os from pathlib import", "for training a patch using a pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1,", "defaults.check_and_fill_args( args, defaults.PGD_ARGS, datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir,", "action='store_true') parser.add_argument('--save-freq', type=int, default=50, help=\"How frequently we should save images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only", "is required\" # Important for automatic job retries on the cluster in case", "parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser =", "given, will be used as a fixed \" \"target class (only optimize ONE", "to train_model do avoid resuming for epoch, optimizers etc. if args.boosting == 'class_consistent':", "one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use tqdm.') parser.add_argument('--exp-name',", "parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to find", "# Transform everything to have the same label class_tx = lambda x, y:", "BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS,", "pretrained model).') parser.add_argument('--zipped', action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms to", "== 0: save_dir = Path(store.save_dir) #TODO: Move this part inside the 2D boosters.", "booster, args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists():", "64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def", "'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser) parser = defaults.add_args_to_parser(defaults.MODEL_LOADER_ARGS, parser) parser", "the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON config", "sys from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import", "from torchvision.utils import save_image curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse", "to act \" \"in single-class mode. If given, will be used as a", "type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float, default=0.5,", "save_dir / f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else:", "loop_type == 'val' or model.module.booster is None: return if args.training_mode in ['booster', 'joint']:", "save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0:", "torch as ch from robustness import datasets, defaults, loaders, model_utils, train from robustness.tools", "set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k]) assert not args.adv_train, 'not supported", "f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i == 0: print(f'Saved in", "if args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds", "if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset':", "for epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP", "args.single_class is not None: print(f\"Boosting towards a single class {args.single_class}\") # Transform everything", "parser.add_argument('--patch-size', type=int, default=70) parser.add_argument('--training-mode', type=str, choices=['joint','model','booster']) parser.add_argument('--arch', type=str, default='resnet18') parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--patch-lr',", "parser.add_argument('--resume', action='store_true', help='Whether to resume training the DataAugmentedModel or not.' 'Useful to continue", "same label class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class) train_loader =", "save_dir / f'boosted_{i}.jpg' save_image(example_boosted[:4], bs_path) example_adversaried = model.module.boosted_model.apply(example_boosted) inp_path = save_dir / f'inp_{i}.jpg'", "debug stuff') parser.add_argument('--json-config', help='Path to a JSON config file **that will override argparse", "= breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0]", "in new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k]) assert", "nn from torchvision import models from torchvision.datasets import CIFAR10 from . import boosters,", "help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most", "DataAugmentedModel or not.' 'Useful to continue training if job is pre-empted.' '(Overrides the", "None: return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq ==", "if args.exp_name == 'random': args.exp_name = str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name !=", "LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve')", "= save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i ==", "= boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path = Path(args.out_dir) / BOOSTING_FP", "= None if args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint", "in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found", "is not None: print(f\"Boosting towards a single class {args.single_class}\") # Transform everything to", "{args.dataset} | Model: {args.arch}\") if args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader,", "== 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path':", "debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == 'none':", "args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) / BOOSTING_FP if boosting_path.exists(): booster = ch.load(boosting_path)", "== 'living17': split = breeds_helpers.make_living17(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) else: raise NotImplementedError #", "val_loader, store=store) parameters = [model.dummy] # avoids empty param list to optimizer when", "default=0.5, help=\"Minimum lighting (darkest)\") parser.add_argument('--max-light', type=float, default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage:", "training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill) sd = checkpoint['model'] sd", "args.resume and os.path.isfile(resume_path): print('[Resuming training BoostedModel from a checkpoint...]') checkpoint = ch.load(resume_path, pickle_module=dill)", "LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or", "model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting not found: {args.boosting}') return model.cuda()", "datasets.CustomImageNet(args.data, split) else: raise NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size,", "boosters, constants from .utils import custom_datasets, LinearModel from uuid import uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor)", "args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if", "= str(uuid4()) print(f\"Experiment name: {args.exp_name}\") assert args.exp_name != None # Preprocess args default_ds", "data_aug=True) return ds, (train_loader, val_loader) def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH", "/ f'inp_{i}.jpg' adv_path = save_dir / f'adv_{i}.jpg' save_image(inp[:4], inp_path) save_image(example_adversaried[:4], adv_path) else: if", "ch.ones_like(y) * args.single_class) train_loader = loaders.LambdaLoader(train_loader, class_tx) val_loader = loaders.LambdaLoader(val_loader, class_tx) model =", "--dataset cifar --batch-size 64 --out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode", "1], help='Do not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise')", "import Path import cox.store import cox.utils import dill import json import numpy as", "the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do not use tqdm.')", "consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',') if args.augmentations else", "--out-dir outdir --exp-name tmp --patch-size 10 --patch-lr 0.01 --training-mode joint \"\"\" def get_dataset_and_loaders(args):", "checkpoint resume_path = os.path.join(args.out_dir, args.exp_name, 'checkpoint.pt.latest') checkpoint = None if args.resume and os.path.isfile(resume_path):", "args default_ds = args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args(", "args.single_class else ds.num_classes if arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs =", "raise NotImplementedError # TODO: with_index train_loader, val_loader = ds.make_loaders(batch_size=args.batch_size, val_batch_size=args.batch_size, workers=args.workers, data_aug=True) return", "action='store_true') parser.add_argument('--apply-booster-transforms', type=int, default=1, choices=[0, 1], help='Apply random transforms to the booster.') parser.add_argument('--debug',", "images\") parser.add_argument('--save-only-last', action='store_true', help=\"Only keep the last visualizations instead of all\") parser.add_argument('--resume', action='store_true',", "print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config)) assert all(hasattr(args, k) for k in", "type=int, help=\"Whether to act \" \"in single-class mode. If given, will be used", "batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode)", "'Useful to continue training if job is pre-empted.' '(Overrides the one in robustness.defaults)')", "corruptions. # For consistenct when loading from checkpoints model = boosters.DataAugmentedModel(model, ds.ds_name, args.augmentations.split(',')", "from pathlib import Path import cox.store import cox.utils import dill import json import", "optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender", "ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering", "default=1, type=int) parser.add_argument('--forward-render', action='store_true', help=\"Use blender rendering on forward pass instead of matmul\")", "in the loop (see constants.py for details)\") # Render configuration parser.add_argument('--render-samples', type=int, default=1)", "args.adv_train, 'not supported yet slatta dog' assert args.training_mode is not None, \"training_mode is", "ds, 'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs)", "to a checkpoint to load (useful for training a patch using a pretrained", "use object from file instead of Cube') # Zoom (bigger = more zoomed", "if there are not corruptions. # For consistenct when loading from checkpoints model", "import cox.store import cox.utils import dill import json import numpy as np import", "we do that, we cannot save the \"corrupted\" # boosted images, but only", "cause if we do that, we cannot save the \"corrupted\" # boosted images,", "= boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model", "!= '3d': inp, target = inp.cuda(), target.cuda() example_boosted = model.module.booster(inp, target) bs_path =", "= iteration_hook return train.train_model(args, model, (train_loader, val_loader), store=store, checkpoint=checkpoint, update_params=parameters) if __name__ ==", "parser.parse_args() if args.json_config is not None: print(\"Overriding args with JSON...\") new_args = json.load(open(args.json_config))", "default=0.5, help=\"Maximum lighting (lightest)\") \"\"\" Example usage: python main.py --arch resnet50 --dataset cifar", "def get_boosted_model(args, ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch", "render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif", "ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13': split", "args.augmentations.split(',') if args.augmentations else []) # don't pass checkpoint to train_model do avoid", "args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not", "args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata", "args, defaults.CONFIG_ARGS, datasets.DATASETS[default_ds]) if not args.eval_only: args = defaults.check_and_fill_args( args, defaults.TRAINING_ARGS, datasets.DATASETS[default_ds]) if", "== 'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17':", "Cube') # Zoom (bigger = more zoomed out) parser.add_argument('--min-zoom', type=int, default=20, help=\"Minimum zoom", "return if args.training_mode in ['booster', 'joint']: model.module.booster.step_booster(lr=args.patch_lr) if i % args.save_freq == 0:", "model_utils.make_and_restore_model(**kwargs) # Wrap the model wtith DataAugmentedModel even if there are not corruptions.", "arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds, 'resume_path': args.model_path, 'add_custom_forward':", "# Resume traing the boosted model from a checkpoint resume_path = os.path.join(args.out_dir, args.exp_name,", "cox.store.Store(args.out_dir, args.exp_name) if 'metadata' not in store.keys: args_dict = args.__dict__ schema = cox.store.schema_from_dict(args_dict)", "defaults.add_args_to_parser(defaults.PGD_ARGS, parser) # Custom arguments parser.add_argument('--boosting', choices=['none', 'class_consistent', '3d'], default='class_consistent', help='Dataset (Overrides the", "resuming for epoch, optimizers etc. if args.boosting == 'class_consistent': boosting_path = Path(args.out_dir) /", "= boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif", "render_options = { 'min_zoom': args.min_zoom, 'max_zoom': args.max_zoom, 'min_light': args.min_light, 'max_light': args.max_light, 'samples': args.render_samples", "save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad(): model(inp, target, save_dir=save_dir) if i", "val_loader = loaders.LambdaLoader(val_loader, class_tx) model = get_boosted_model(args, ds) # Resume traing the boosted", "curr_path = pathlib.Path(__file__).parent.absolute() sys.path.insert(0, str(curr_path / 'better_corruptions')) import argparse import os from pathlib", "args.eval_only: print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] #", "{k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint of BoostedModel'{}' (epoch {})\".format(resume_path,", "action='store_true', help=\"Add corruptions in the loop (see constants.py for details)\") # Render configuration", "but only the boosted images if args.boosting != '3d': inp, target = inp.cuda(),", "from . import boosters, constants from .utils import custom_datasets, LinearModel from uuid import", "adv_path) else: if not args.save_only_last: save_dir = save_dir / f'iteration_{i}' os.makedirs(save_dir) with ch.no_grad():", "parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting parser.add_argument('--min-light', type=float,", "default=None, help='Path to a checkpoint to load (useful for training a patch using", "boosters.ClassConsistentBooster(ds.num_classes, dim, constants.PATCH_TRANSFORMS, args.patch_size, model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting", "model, apply_transforms=args.apply_booster_transforms) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting == '3d': boosting_path =", "else args.arch num_classes = 1 if args.single_class else ds.num_classes if arch == 'linear':", "= constants.THREE_D_CORRUPTIONS if args.add_corruptions else None booster = boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options,", "if job is pre-empted.' '(Overrides the one in robustness.defaults)') parser.add_argument('--model-path', type=str, default=None, help='Path", "not use tqdm.') parser.add_argument('--exp-name', type=str, required=False) parser.add_argument('--augmentations', type=str, default=None, help='e.g. fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar',", "'imagenet': ds = datasets.ImageNet(args.data) if args.zipped: ds.custom_class = 'Zipped' elif args.dataset == 'entity13':", "get_dataset_and_loaders(args) if args.single_class is not None: print(f\"Boosting towards a single class {args.single_class}\") #", "= checkpoint['model'] sd = {k[len('module.'):]:v for k,v in sd.items()} model.load_state_dict(sd) print(\"=> loaded checkpoint", "parameters = model.boosted_model.parameters() def iteration_hook(model, i, loop_type, inp, target): if loop_type == 'val'", "= Path(store.save_dir) #TODO: Move this part inside the 2D boosters. It is #", "assert not args.adv_train, 'not supported yet slatta dog' assert args.training_mode is not None,", "fog,gaussian_noise') parser.add_argument('--dataset', choices=['cifar', 'imagenet', 'entity13', 'living17', 'solids', 'city'], default='imagenet') parser.add_argument('--info-dir', type=str, help='Where to", "zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e., most zoomed out)\") # Lighting", "checkpoint to load (useful for training a patch using a pretrained model).') parser.add_argument('--zipped',", "= constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes = 1 if args.single_class else ds.num_classes", "new_args), set(new_args.keys()) - set(vars(args).keys()) for k in new_args: setattr(args, k, new_args[k]) assert not", "\"\"\" Example usage: python main.py --arch resnet50 --dataset cifar --batch-size 64 --out-dir outdir", "default=20, help=\"Minimum zoom (i.e., most zoomed in)\") parser.add_argument('--max-zoom', type=int, default=40, help=\"Maximum zoom (i.e.,", "if args.dataset == 'solids': ds = datasets.ImageNet(args.data, custom_class=custom_datasets.SolidColors, custom_class_args={'image_size': constants.DS_TO_DIM[args.dataset]}) elif args.dataset ==", "'entity13': split = breeds_helpers.make_entity13(args.info_dir)[1][0] ds = datasets.CustomImageNet(args.data, split) elif args.dataset == 'living17': split", "a JSON config file **that will override argparse args**') ## Arguments for 3D", "cluster in case of premptions. Avoid uuids. if args.exp_name == 'random': args.exp_name =", "args.augmentations else []) # don't pass checkpoint to train_model do avoid resuming for", "== 'none': # assert args.eval_only model = boosters.BoostedModel(model, None, args.training_mode) else: raise ValueError(f'boosting", "default='class_consistent', help='Dataset (Overrides the one in robustness.defaults)') parser.add_argument('--no-tqdm', type=int, default=1, choices=[0, 1], help='Do", "matmul\") parser.add_argument('--add-corruptions', action='store_true', help=\"Add corruptions in the loop (see constants.py for details)\") #", "loop_type, inp, target): if loop_type == 'val' or model.module.booster is None: return if", "= args.dataset if args.dataset in datasets.DATASETS else \"imagenet\" args = defaults.check_and_fill_args( args, defaults.CONFIG_ARGS,", "from torchvision import models from torchvision.datasets import CIFAR10 from . import boosters, constants", "'resume_path': args.model_path, 'add_custom_forward': is_pt_model or args.arch=='linear', 'pytorch_pretrained': args.pytorch_pretrained} model, _ = model_utils.make_and_restore_model(**kwargs) #", "arch == 'linear': arch = LinearModel(num_classes, constants.DS_TO_DIM[args.dataset]) kwargs = {'arch': arch, 'dataset': ds,", "uuid4 # ch.set_default_tensor_type(ch.cuda.FloatTensor) BOOSTING_FP = 'boosting.ch' parser = argparse.ArgumentParser(conflict_handler='resolve') parser = defaults.add_args_to_parser(defaults.CONFIG_ARGS, parser)", "boosters.ThreeDBooster(num_classes=num_classes, tex_size=args.patch_size, image_size=dim, batch_size=args.batch_size, render_options=render_options, num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model =", "inp, target): if loop_type == 'val' or model.module.booster is None: return if args.training_mode", "slatta dog' assert args.training_mode is not None, \"training_mode is required\" # Important for", "print('==>[Evaluating the model]') return train.eval_model(args, model, val_loader, store=store) parameters = [model.dummy] # avoids", "\"target class (only optimize ONE texture across all images)\") parser.add_argument('--num-texcoord-renderers', default=1, type=int) parser.add_argument('--forward-render',", "% args.save_freq == 0: save_dir = Path(store.save_dir) #TODO: Move this part inside the", "to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff') parser.add_argument('--json-config', help='Path to a JSON", "ds): is_pt_model = args.arch in constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained)", "choices=[0, 1], help='Apply random transforms to the booster.') parser.add_argument('--debug', action='store_true', help='Print debug stuff')", "constants.NAME_TO_ARCH and args.dataset == 'imagenet' arch = constants.NAME_TO_ARCH[args.arch](args.pytorch_pretrained) if is_pt_model else args.arch num_classes", "booster = ch.load(boosting_path) else: dim = constants.DS_TO_DIM[args.dataset] render_options = { 'min_zoom': args.min_zoom, 'max_zoom':", "args.__dict__ schema = cox.store.schema_from_dict(args_dict) store.add_table('metadata', schema) store['metadata'].append_row(args_dict) else: print('[Found existing metadata in store.", "num_texcoords=args.num_texcoord_renderers, num_gpus=ch.cuda.device_count(), debug=args.debug, forward_render=args.forward_render, custom_file=args.custom_file, corruptions=corruptions) model = boosters.BoostedModel(model, booster, args.training_mode) elif args.boosting", "datasets.DATASETS[default_ds]) args = defaults.check_and_fill_args( args, defaults.MODEL_LOADER_ARGS, datasets.DATASETS[default_ds]) store = cox.store.Store(args.out_dir, args.exp_name) if 'metadata'" ]
[ "0 1 iterate 60 begin draw 0 .7 iterate 7 begin draw 0.096441350201699999", "width .15 jump 0 1 iterate 60 begin draw 0 .7 iterate 7", "draw 0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273", ".43 left 35 scale .32 width .15 jump 0 1 iterate 60 begin", "iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1 1", "3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0", "0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end draw", "begin draw 0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right", "7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0", "draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end", "0 0 color 0 1 0 jump .45 .43 left 35 scale .32", "jump .45 .43 left 35 scale .32 width .15 jump 0 1 iterate", ".7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1", "0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor", "0 1 0 jump .45 .43 left 35 scale .32 width .15 jump", ".45 .43 left 35 scale .32 width .15 jump 0 1 iterate 60", "1 0 .012 end draw 0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764", "left 35 scale .32 width .15 jump 0 1 iterate 60 begin draw", "0 color 0 1 0 jump .45 .43 left 35 scale .32 width", "jump 0 1 iterate 60 begin draw 0 .7 iterate 7 begin draw", "0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor", "1 1 0 .012 end draw 0 -.7 iterate 3 begin draw 0.056730206001", "iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1", "begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012", "draw 0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273", "scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end draw 0 -.7", "begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012", "iterate 60 begin draw 0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale", "3.27272727273 mixcolor 1 1 0 .012 end draw 0 -.7 iterate 3 begin", ".012 end draw 0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379", ".32 width .15 jump 0 1 iterate 60 begin draw 0 .7 iterate", "0 0 0 color 0 1 0 jump .45 .43 left 35 scale", "end draw 0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right", "-0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end draw 0", "0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end end'''", "35 scale .32 width .15 jump 0 1 iterate 60 begin draw 0", "-.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale 0.99371847379 right 3.27272727273 mixcolor 1", "0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end draw 0 -.7 iterate", "1 0 jump .45 .43 left 35 scale .32 width .15 jump 0", "1 iterate 60 begin draw 0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999", "60 begin draw 0 .7 iterate 7 begin draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379", ".15 jump 0 1 iterate 60 begin draw 0 .7 iterate 7 begin", "right 3.27272727273 mixcolor 1 1 0 .012 end draw 0 -.7 iterate 3", "color 0 1 0 jump .45 .43 left 35 scale .32 width .15", "mixcolor 1 1 0 .012 end draw 0 -.7 iterate 3 begin draw", "scale .32 width .15 jump 0 1 iterate 60 begin draw 0 .7", "0 .012 end draw 0 -.7 iterate 3 begin draw 0.056730206001 -0.00790217776764 scale", "splash='''bgcolor 0 0 0 color 0 1 0 jump .45 .43 left 35", "0 jump .45 .43 left 35 scale .32 width .15 jump 0 1", "draw 0.096441350201699999 -0.013433702204987999 scale 0.99371847379 right 3.27272727273 mixcolor 1 1 0 .012 end" ]
[ "sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo =", "a power of 2 pieceLength = 18 filesPath = sys.argv[2] #Create a new", "piece size as a power of 2 pieceLength = 18 filesPath = sys.argv[2]", "outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise", "track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip()", "import subprocess import urllib import urllib2 import MultipartPostHandler import cookielib import hashlib import", "== key: name = value else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1]", "about the tracks for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = []", "all the output stdout, stderr = proc.communicate() #Check for failure if 0!=proc.returncode: print", "else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval", "\"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to the fairywren instance", "cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd =", "in the #General track if t['type'] == 'General' and 'Complete_name' == key: name", "'Complete_name' == key: name = value else: t[key] = value f_['tracks'].append(t) name =", "failure if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed') retval = {}", "= json.load(response) if 'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return", ") ).read())['announce']['href'] #Get the current piece size as a power of 2 pieceLength", "os import math import subprocess import urllib import urllib2 import MultipartPostHandler import cookielib", "in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value =", "os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl ,data={\"extended\": json.dumps(extendedInfo) , \"title\":str(title),\"torrent\":open(newTorrentPath,'rb')})", "return retval def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as e: if", "minfo except SystemError as e: print 'No mediainfo on upload...' if len(sys.argv) ==", "cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile", "stderr = proc.communicate() #Check for failure if 0!=proc.returncode: print stdout print stderr raise", "str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target)", "import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length='", "except OSError as e: if e.errno!=20: raise e files = [filesPath] return files", "= sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' %", "the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] =", "qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if", "failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc =", "in files] return files def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h =", "import urllib2 import MultipartPostHandler import cookielib import hashlib import base64 import types import", "['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent'", "mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the", "json import os import math import subprocess import urllib import urllib2 import MultipartPostHandler", "stdout print stderr raise SystemError('mediainfo failed') retval = {} #Parse the output doc", "(os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile", "the information about the tracks for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks']", "mediainfo on upload...' if len(sys.argv) == 4: title = sys.argv[3] else: title =", "json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href']", "doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name = None for track in", "key: name = value else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name]", "private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!=", "else: title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl ,data={\"extended\":", "{} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the first", "if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo", "= {} f_['tracks'] = [] name = None for track in f.getElementsByTagName('track'): t", "= json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] )", "cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf", "files = os.listdir(filesPath) except OSError as e: if e.errno!=20: raise e files =", "= mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo']", "cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr =", "retval['files'][name] = f_ return retval def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError", "open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to the fairywren instance fairywren =", "= str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key", "new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try: minfo", "body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\":", "== \"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to the fairywren", "subprocess import urllib import urllib2 import MultipartPostHandler import cookielib import hashlib import base64", "import hashlib import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent']", "import urllib import urllib2 import MultipartPostHandler import cookielib import hashlib import base64 import", "in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if", "e: if e.errno!=20: raise e files = [filesPath] return files files = [os.path.join(filesPath,f)", "= tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file in the #General track", "f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1", "urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login", "f_ = {} f_['tracks'] = [] name = None for track in f.getElementsByTagName('track'):", "buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl", "announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % (", "= listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except", "#Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {}", "#Get the current piece size as a power of 2 pieceLength = 18", "import math import subprocess import urllib import urllib2 import MultipartPostHandler import cookielib import", "url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if 'error' in body: raise Exception(body['error'])", "in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version']", "tracks for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name =", "% ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size as a", "= json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size", "[os.path.join(filesPath,f) for f in files] return files def buildOpener(url,username,password): url = str(url) def", "json.load(response) if 'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler)", "urllib import urllib2 import MultipartPostHandler import cookielib import hashlib import base64 import types", "upload...' if len(sys.argv) == 4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload", "for failure if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed') retval =", "try: files = os.listdir(filesPath) except OSError as e: if e.errno!=20: raise e files", "= {} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and 'text'", "hashlib import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce='", "= cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin:", "not in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version", "#Mediainfo shows the name of the file in the #General track if t['type']", "f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath): try: files", "files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr = proc.communicate() #Check", "a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try:", "raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with", "import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength))", "f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name = None for", "cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' %", "value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file in the #General", "import os import math import subprocess import urllib import urllib2 import MultipartPostHandler import", "str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session'", "the file in the #General track if t['type'] == 'General' and 'Complete_name' ==", "= mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e: print 'No mediainfo on", "% (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return", "urllib2 import MultipartPostHandler import cookielib import hashlib import base64 import types import xml.dom.minidom", "mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract the", "#Read all the output stdout, stderr = proc.communicate() #Check for failure if 0!=proc.returncode:", "#Check for failure if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed') retval", "conf = json.load(fin) #Login to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl =", "newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files)", "types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' +", "the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo tag", "name = value else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] =", "power of 2 pieceLength = 18 filesPath = sys.argv[2] #Create a new torrent", "= hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response =", "except SystemError as e: print 'No mediainfo on upload...' if len(sys.argv) == 4:", "listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as e: if e.errno!=20: raise e", "name of the file in the #General track if t['type'] == 'General' and", "try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e: print 'No", "outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read", "h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response", "2 pieceLength = 18 filesPath = sys.argv[2] #Create a new torrent newTorrentPath =", "def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private:", "request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if 'error'", "tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value", "xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if", "for tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip()", "urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if 'error' in body:", "SystemError as e: print 'No mediainfo on upload...' if len(sys.argv) == 4: title", "['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr", "instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account =", "SystemError('mediainfo failed') retval = {} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything", "0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd", "listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError", "= 18 filesPath = sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files", "= doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract the information about the", "stdout, stderr = proc.communicate() #Check for failure if 0!=proc.returncode: print stdout print stderr", "url = str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request", "subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr = proc.communicate() #Check for failure if", "the #General track if t['type'] == 'General' and 'Complete_name' == key: name =", "import MultipartPostHandler import cookielib import hashlib import base64 import types import xml.dom.minidom def", "= str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request =", "hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request)", "mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e: print 'No mediainfo on upload...'", "= ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile =", "tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the", "if t['type'] == 'General' and 'Complete_name' == key: name = value else: t[key]", "mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private')", "#For each file, extract the information about the tracks for f in doc.getElementsByTagName('File'):", "xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract", "and 'Complete_name' == key: name = value else: t[key] = value f_['tracks'].append(t) name", "if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML']", "if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed') retval = {} #Parse", "json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size as", "f_ return retval def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as e:", "current piece size as a power of 2 pieceLength = 18 filesPath =", "and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the", "= name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath): try: files = os.listdir(filesPath)", "'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name", "files = listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo", "failed') retval = {} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not", "math import subprocess import urllib import urllib2 import MultipartPostHandler import cookielib import hashlib", "output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo tag doc", "retval def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as e: if e.errno!=20:", "fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read())", "def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all", "% url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if 'error' in body: raise", "raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files", "sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl", "= str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl", "fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size as a power of", "f_['tracks'] = [] name = None for track in f.getElementsByTagName('track'): t = {}", "stderr raise SystemError('mediainfo failed') retval = {} #Parse the output doc = xml.dom.minidom.parseString(stdout)", "extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as", "output stdout, stderr = proc.communicate() #Check for failure if 0!=proc.returncode: print stdout print", "on upload...' if len(sys.argv) == 4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1]", "first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip()", "str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key =", "name = None for track in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type'))", "= {} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the", "anything not in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo", "buildOpener(url,username,password): url = str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)})", "#Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl ,data={\"extended\": json.dumps(extendedInfo) , \"title\":str(title),\"torrent\":open(newTorrentPath,'rb')}) os.unlink(newTorrentPath)", "cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf =", "= os.listdir(filesPath) except OSError as e: if e.errno!=20: raise e files = [filesPath]", "proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr = proc.communicate() #Check for", "= xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0]", "return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE)", "base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' + announce)", "= f_ return retval def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as", "% fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get", ").read())['announce']['href'] #Get the current piece size as a power of 2 pieceLength =", "announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece", "title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl ,data={\"extended\": json.dumps(extendedInfo)", "value else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return", "'No mediainfo on upload...' if len(sys.argv) == 4: title = sys.argv[3] else: title", "= value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath):", "size as a power of 2 pieceLength = 18 filesPath = sys.argv[2] #Create", "= ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout,", "extendedInfo['mediainfo'] = minfo except SystemError as e: print 'No mediainfo on upload...' if", "os.listdir(filesPath) except OSError as e: if e.errno!=20: raise e files = [filesPath] return", "cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output", "cookielib import hashlib import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd =", "import sys import json import os import math import subprocess import urllib import", "if 'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if", "return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin)", "{} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in", "for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name = None", "== 4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent to", "= os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents' % fwurl ,data={\"extended\": json.dumps(extendedInfo) ,", "+ str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile)", "json.load(fin) #Login to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve", "tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file in the", "18 filesPath = sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files =", "files] return files def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h = hashlib.sha512()", "= json.load(fin) #Login to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url'])", "+= files proc = subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr = proc.communicate()", "retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract the information about", "MultipartPostHandler import cookielib import hashlib import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private):", "announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output='", "name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath): try: files =", "def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','')", "file, extract the information about the tracks for f in doc.getElementsByTagName('File'): f_ =", "t['type'] == 'General' and 'Complete_name' == key: name = value else: t[key] =", "response = urllib2.urlopen(request) body = json.load(response) if 'error' in body: raise Exception(body['error']) cookies", "cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd):", "t = {} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and", "files = [filesPath] return files files = [os.path.join(filesPath,f) for f in files] return", "4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren", "for track in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for tag in", "name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath): try: files = os.listdir(filesPath) except", "h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body", "each file, extract the information about the tracks for f in doc.getElementsByTagName('File'): f_", "'%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\")", "[] name = None for track in f.getElementsByTagName('track'): t = {} t['type'] =", "doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract the information about the tracks", "hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp)", "file in the #General track if t['type'] == 'General' and 'Complete_name' == key:", "= [os.path.join(filesPath,f) for f in files] return files def buildOpener(url,username,password): url = str(url)", "print stdout print stderr raise SystemError('mediainfo failed') retval = {} #Parse the output", "tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file in the #General track if", "with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to the fairywren instance fairywren", "track in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for tag in track.childNodes:", "'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__", "= None for track in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for", "minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e: print 'No mediainfo", "0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed') retval = {} #Parse the", "fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session'", "= '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent", "t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def", "cmd.append('--output=' + outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def", "= {} #For each file, extract the information about the tracks for f", "t['type'] = str(track.getAttribute('type')) for tag in track.childNodes: if len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName:", "= urllib2.urlopen(request) body = json.load(response) if 'error' in body: raise Exception(body['error']) cookies =", "if __name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to", ").read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current", "fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account", "return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body =", "retval['files'] = {} #For each file, extract the information about the tracks for", "#Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s'", "doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each", "+ announce) cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),)", "import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd = ['/usr/bin/mktorrent'] cmd.append('--announce=' +", "account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href']", "cmd.append('--piece-length=' + str(pieceLength)) if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' +", "#Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each file,", "doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {}", "files files = [os.path.join(filesPath,f) for f in files] return files def buildOpener(url,username,password): url", "key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file", "{} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e: print", "the output stdout, stderr = proc.communicate() #Check for failure if 0!=proc.returncode: print stdout", "= value else: t[key] = value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_", "as fin: conf = json.load(fin) #Login to the fairywren instance fairywren = buildOpener(**conf['fairywren'])", "title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent to fairywren fairywren.open('%s/api/torrents'", "e files = [filesPath] return files files = [os.path.join(filesPath,f) for f in files]", "urllib2.urlopen(request) body = json.load(response) if 'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar()", "the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract", "#Login to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the", "EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd += files proc", "for f in files] return files def buildOpener(url,username,password): url = str(url) def hashPassword(pw):", "as e: if e.errno!=20: raise e files = [filesPath] return files files =", "the tracks for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name", "files def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw) return", "import json import os import math import subprocess import urllib import urllib2 import", "in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of", "= {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] = minfo except SystemError as e:", "return files files = [os.path.join(filesPath,f) for f in files] return files def buildOpener(url,username,password):", "= tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows the name of the file in", "in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] = [] name = None for track", "len(sys.argv) == 4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the torrent", "body = json.load(response) if 'error' in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request)", "( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size as a power", "#Ignore anything not in the first Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the", "base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body = json.load(response)", "print 'No mediainfo on upload...' if len(sys.argv) == 4: title = sys.argv[3] else:", "the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url", "shows the name of the file in the #General track if t['type'] ==", "account['my']['href'] ) ).read())['announce']['href'] #Get the current piece size as a power of 2", "= urllib2.Request('%s/api/session' % url,data=qp) response = urllib2.urlopen(request) body = json.load(response) if 'error' in", "proc.communicate() #Check for failure if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo failed')", "value f_['tracks'].append(t) name = name.strip().split(os.sep)[-1] retval['files'][name] = f_ return retval def listFiles(filesPath): try:", "pieceLength = 18 filesPath = sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True)", "doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo tag doc =", "outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd", "raise e files = [filesPath] return files files = [os.path.join(filesPath,f) for f in", "tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] =", "subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files): cmd = ['/usr/bin/mediainfo','--output=XML'] cmd +=", "None for track in f.getElementsByTagName('track'): t = {} t['type'] = str(track.getAttribute('type')) for tag", "{} #For each file, extract the information about the tracks for f in", "#General track if t['type'] == 'General' and 'Complete_name' == key: name = value", "== 'General' and 'Complete_name' == key: name = value else: t[key] = value", "as e: print 'No mediainfo on upload...' if len(sys.argv) == 4: title =", "{} f_['tracks'] = [] name = None for track in f.getElementsByTagName('track'): t =", "Mediainfo tag doc = doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files']", "the name of the file in the #General track if t['type'] == 'General'", "mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try: minfo = mediainfo(*files) extendedInfo['mediainfo'] =", "[filesPath] return files files = [os.path.join(filesPath,f) for f in files] return files def", "fin: conf = json.load(fin) #Login to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl", "raise SystemError('mediainfo failed') retval = {} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore", "def hashPassword(pw): h = hashlib.sha512() h.update(pw) return base64.urlsafe_b64encode(h.digest()).replace('=','') qp=urllib.urlencode({\"username\":username,\"password\":<PASSWORD>(password)}) request = urllib2.Request('%s/api/session' %", "the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' %", "= minfo except SystemError as e: print 'No mediainfo on upload...' if len(sys.argv)", "OSError as e: if e.errno!=20: raise e files = [filesPath] return files files", "__name__ == \"__main__\": with open(sys.argv[1],'r') as fin: conf = json.load(fin) #Login to the", "cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r') as", "= buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' %", "f in files] return files def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h", "torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo = {} try: minfo =", "e.errno!=20: raise e files = [filesPath] return files files = [os.path.join(filesPath,f) for f", "def listFiles(filesPath): try: files = os.listdir(filesPath) except OSError as e: if e.errno!=20: raise", "track if t['type'] == 'General' and 'Complete_name' == key: name = value else:", "#Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in the first Mediainfo", "sys import json import os import math import subprocess import urllib import urllib2", "Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ == \"__main__\": with open(sys.argv[1],'r')", "= doc.getElementsByTagName('Mediainfo')[0] #Extract the mediainfo version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For", "if len(sys.argv) == 4: title = sys.argv[3] else: title = os.path.split(filesPath)[-1] #Upload the", "fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl, account['my']['href'] ) ).read())['announce']['href'] #Get the", "extract the information about the tracks for f in doc.getElementsByTagName('File'): f_ = {}", "return files def buildOpener(url,username,password): url = str(url) def hashPassword(pw): h = hashlib.sha512() h.update(pw)", "len(tag.childNodes)==1 and 'text' in tag.childNodes[0].nodeName: key = tag.tagName.strip() value = tag.childNodes[0].nodeValue.strip() #Mediainfo shows", "of 2 pieceLength = 18 filesPath = sys.argv[2] #Create a new torrent newTorrentPath", "files = [os.path.join(filesPath,f) for f in files] return files def buildOpener(url,username,password): url =", "as a power of 2 pieceLength = 18 filesPath = sys.argv[2] #Create a", "to the fairywren instance fairywren = buildOpener(**conf['fairywren']) fwurl = str(conf['fairywren']['url']) #Retrieve the announce", "filesPath = sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath)", "= [] name = None for track in f.getElementsByTagName('track'): t = {} t['type']", "retval = {} #Parse the output doc = xml.dom.minidom.parseString(stdout) #Ignore anything not in", "+ outfile) cmd.append(target) if 0!= subprocess.call(cmd): raise EnvironmentError(\"mktorrent failed\") return outfile def mediainfo(*files):", "'General' and 'Complete_name' == key: name = value else: t[key] = value f_['tracks'].append(t)", "print stderr raise SystemError('mediainfo failed') retval = {} #Parse the output doc =", "str(conf['fairywren']['url']) #Retrieve the announce url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl =", "= [filesPath] return files files = [os.path.join(filesPath,f) for f in files] return files", "if e.errno!=20: raise e files = [filesPath] return files files = [os.path.join(filesPath,f) for", "e: print 'No mediainfo on upload...' if len(sys.argv) == 4: title = sys.argv[3]", "= subprocess.Popen(cmd,stdout=subprocess.PIPE) #Read all the output stdout, stderr = proc.communicate() #Check for failure", "if private: cmd.append('--private') outfile = '%s.%i.torrent' % (os.tempnam(),os.getpid(),) cmd.append('--output=' + outfile) cmd.append(target) if", "= proc.communicate() #Check for failure if 0!=proc.returncode: print stdout print stderr raise SystemError('mediainfo", "the current piece size as a power of 2 pieceLength = 18 filesPath", "version retval['version'] = doc.getAttribute('version').strip() retval['files'] = {} #For each file, extract the information", "url account = json.loads(fairywren.open('%s/api/session' % fwurl ).read()) announceUrl = json.loads(fairywren.open('%s/%s' % ( fwurl,", "information about the tracks for f in doc.getElementsByTagName('File'): f_ = {} f_['tracks'] =", "of the file in the #General track if t['type'] == 'General' and 'Complete_name'", "= sys.argv[2] #Create a new torrent newTorrentPath = mktorrent(filesPath,announceUrl,pieceLength,True) files = listFiles(filesPath) extendedInfo", "import cookielib import hashlib import base64 import types import xml.dom.minidom def mktorrent(target,announce,pieceLength,private): cmd", "in body: raise Exception(body['error']) cookies = cookielib.CookieJar() cookies.extract_cookies(response,request) return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),MultipartPostHandler.MultipartPostHandler) if __name__ ==" ]
[ "CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data: return None return", "field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config:", "CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def", ".utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names:", "-> Dict[str, List[str]]: return { key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() }", "from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME", "parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return", "-> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value) for key,", "yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) ->", "import Dict, List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string", "EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp)", "value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]:", "} def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type',", "from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str,", "custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') )", "data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict):", "dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]:", "typing import Dict, List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list from", "= None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config')", "open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig:", "scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key: parse_list(value) for key, value in", "Dict, List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import", "from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected:", "fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def", "CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig", "scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value) for key, value in scoring_type_config.items()", "CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple):", "def from_json(data: Optional[dict]): if not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for", "= CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def", "field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def", "str) -> Dict[str, List[str]]: return { key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items()", "CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[])", "@staticmethod def from_json(data: Optional[dict]): if not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data)", "parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp:", "from_json(data: Optional[dict]): if not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data", "class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] =", "Optional[dict] = None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']),", "-> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return", "return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str)", "parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp)", "from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def", "EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key:", "field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name:", "fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data: return None return CustomEvaluationConfig(", "] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return", "evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data:", "import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict):", "CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'],", "return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key: parse_list(value)", "get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]:", "not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ]", "return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod", "@staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type:", ") def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict:", "EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key: parse_list(value) for", "Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key:", "yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from .utils.config import parse_config_as_dict", "= 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names']", "CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig", "return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return", "dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig", "-> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return {", "key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str,", "if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def", "parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod", "class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class", "DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig(", "in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config", "return { key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map:", "name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data:", "for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod", "None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') )", "sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME =", "name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod", "str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict)", "@staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) ->", "return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom:", "with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) ->", "Optional import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from .utils.config", "Dict[str, List[str]]: return { key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def", ") class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data: return", "'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] )", "None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple):", "dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp)", "expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if", "str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict):", "CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp)", "def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {})", "from .utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple):", "data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] )", "if not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields']", "isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json:", "import parse_list from .utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml'", "List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict", "-> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str):", "fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig =", "def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if", "Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp,", "def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str,", "@staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class", "def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as fp: return", "parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config =", "from typing import Dict, List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list", "parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key: parse_list(value) for key, value", "CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom') ) )", ") class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig(", "actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not", "evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data:", "dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return", "class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data: return None", "Optional[dict]): if not data: return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in", "class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json(", "dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig]", "expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict): return", "EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data: dict): return EvaluationConfig( custom=CustomEvaluationConfig.from_json( data.get('custom')", "return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides(", "Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with", "{ key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str,", "List[str]]: return { key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config(", "List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value) for key, value in", "key: parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str,", "def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str,", "CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig(", "from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields:", "List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]): if not data: return None return CustomEvaluationConfig( fields=[", "NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from", "List[str] @staticmethod def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str", "parse_list from .utils.string import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class", "def from_json(data: dict): return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str", "def parse_scoring_type_overrides( scoring_type_overrides_str: str) -> Dict[str, List[str]]: return { key: parse_list(value) for key,", "evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple): fields: List[CustomEvaluationFieldConfig] @staticmethod def from_json(data: Optional[dict]):", ".utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str] @staticmethod def from_json(data:", "CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None", ") ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) ->", ") class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict]", "get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return", "config_map: Dict[str, Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return {", "yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json) def parse_scoring_type_overrides( scoring_type_overrides_str:", "actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'],", "= config_map.get('scoring_type', {}) return { key: parse_list(value) for key, value in scoring_type_config.items() }", "str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual: CustomEvaluationFieldSourceConfig evaluation_type_config: Optional[dict] = None @staticmethod def", "<reponame>elifesciences/sciencebeam-judge<gh_stars>0 from typing import Dict, List, NamedTuple, Optional import yaml from sciencebeam_utils.utils.string import", "custom=CustomEvaluationConfig.from_json( data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def", "def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']), actual=CustomEvaluationFieldSourceConfig.from_json(data['actual']), evaluation_type_config=data.get('evaluation_type_config') ) class CustomEvaluationConfig(NamedTuple):", "data.get('custom') ) ) def parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp)", "for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]]) ->", "as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return EvaluationConfig.from_json(evaluation_json)", "import yaml from sciencebeam_utils.utils.string import parse_list from .utils.string import parse_dict from .utils.config import", "in data['fields'] ] ) class EvaluationConfig(NamedTuple): custom: CustomEvaluationConfig = CustomEvaluationConfig(fields=[]) @staticmethod def from_json(data:", "str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r')", "return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp, str): with open(filename_or_fp, 'r') as", "return CustomEvaluationFieldSourceConfig( field_names=data['field_names'] ) class CustomEvaluationFieldConfig(NamedTuple): name: str evaluation_type: str expected: CustomEvaluationFieldSourceConfig actual:", "str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value) for", "parse_list(value) for key, value in parse_dict(scoring_type_overrides_str).items() } def get_scoring_types_by_field_map_from_config( config_map: Dict[str, Dict[str, str]])", "Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value) for key, value", "import parse_dict from .utils.config import parse_config_as_dict DEFAULT_EVALUATION_YAML_FILENAME = 'evaluation.yml' class CustomEvaluationFieldSourceConfig(NamedTuple): field_names: List[str]", "'r') as fp: return yaml.safe_load(fp) return yaml.safe_load(filename_or_fp) def get_evaluation_config_object(evaluation_json: dict) -> EvaluationConfig: return", "parse_evaluation_config(filename_or_fp) -> Dict[str, Dict[str, str]]: return parse_config_as_dict(filename_or_fp) def parse_evaluation_yaml_config(filename_or_fp) -> dict: if isinstance(filename_or_fp,", "evaluation_type_config: Optional[dict] = None @staticmethod def from_json(data: dict): return CustomEvaluationFieldConfig( name=data['name'], evaluation_type=data['evaluation_type'], expected=CustomEvaluationFieldSourceConfig.from_json(data['expected']),", "return None return CustomEvaluationConfig( fields=[ CustomEvaluationFieldConfig.from_json(field_data) for field_data in data['fields'] ] ) class", "Dict[str, str]]) -> Dict[str, List[str]]: scoring_type_config = config_map.get('scoring_type', {}) return { key: parse_list(value)" ]
[ "1 n = 2 while(i<=5): print(n,\" * \", i , \" = \"", "i in l1: for j in l2: print(i,j) print(\"--------\") i = 1 n", "in l2: print(i,j) print(\"--------\") i = 1 n = 2 while(i<=5): print(n,\" *", "in l1: for j in l2: print(i,j) print(\"--------\") i = 1 n =", "print(\"--------\") i = 1 n = 2 while(i<=5): print(n,\" * \", i ,", "<filename>Programs/table.py l1 = ['1','2','3'] l2 = ['4','5','6'] for i in l1: for j", "for j in l2: print(i,j) print(\"--------\") i = 1 n = 2 while(i<=5):", "['4','5','6'] for i in l1: for j in l2: print(i,j) print(\"--------\") i =", "['1','2','3'] l2 = ['4','5','6'] for i in l1: for j in l2: print(i,j)", "print(n,\" * \", i , \" = \" , n*i) i +=1 print(\"--------\")", "print(i,j) print(\"--------\") i = 1 n = 2 while(i<=5): print(n,\" * \", i", "= ['4','5','6'] for i in l1: for j in l2: print(i,j) print(\"--------\") i", "= 1 n = 2 while(i<=5): print(n,\" * \", i , \" =", "= ['1','2','3'] l2 = ['4','5','6'] for i in l1: for j in l2:", "l2 = ['4','5','6'] for i in l1: for j in l2: print(i,j) print(\"--------\")", "j in l2: print(i,j) print(\"--------\") i = 1 n = 2 while(i<=5): print(n,\"", "= 2 while(i<=5): print(n,\" * \", i , \" = \" , n*i)", "2 while(i<=5): print(n,\" * \", i , \" = \" , n*i) i", "i = 1 n = 2 while(i<=5): print(n,\" * \", i , \"", "for i in l1: for j in l2: print(i,j) print(\"--------\") i = 1", "n = 2 while(i<=5): print(n,\" * \", i , \" = \" ,", "l1 = ['1','2','3'] l2 = ['4','5','6'] for i in l1: for j in", "l2: print(i,j) print(\"--------\") i = 1 n = 2 while(i<=5): print(n,\" * \",", "l1: for j in l2: print(i,j) print(\"--------\") i = 1 n = 2", "while(i<=5): print(n,\" * \", i , \" = \" , n*i) i +=1" ]
[ "page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/'", "url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url", "driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() #", "passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver,", "selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)", "'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text", "google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver,", "Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close", "driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): # login to google url =", "driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit()", "= 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url =", "Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return", "url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url =", "'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text", "emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings =", "pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath(", "'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res =", "\"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\",", "url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url", "import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux", "driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd): # login to facebook", "selenium.webdriver.common.keys import Keys from flask import Flask import atexit import json from selenium.webdriver.common.desired_capabilities", "\"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings", "# login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\")", "get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname,", "linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement =", "terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms", "Keys from flask import Flask import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities", "ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import Keys from flask import Flask", "dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config", "driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\")", "return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return", "user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108", "'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url)", "\"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\")", "& privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver,", "to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>)", "DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108", "terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\",", "\"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data)", "passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings =", "terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms", "driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class,", "settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): # login to google", "driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url)", "\"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\"))", "def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver):", "driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): # login to", "Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__)", "Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS)", "to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise", "= googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data =", "Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if __name__ == \"__main__\": app.run(debug=True)", "settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd):", "googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res)", "passwordelement.submit() # get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class,", "uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement =", "time import sleep import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import Keys", "privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url =", "sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd):", "= TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data)", "driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will not find the", "= driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms =", "= 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url =", "Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\",", "driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy settings driver.get(\"https://twitter.com/settings/security\")", "driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url)", "driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms", "-*- from time import sleep import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys", "= driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy settings", "'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will not find", "= driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get", "Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap)", "\"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click()", "(KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent", "methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver,", "terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms", "def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def", "= webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def", "LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings():", "Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app", "passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy", "# login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to", "sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\")", "'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return", "AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] =", "login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit()", "return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return", "= driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd): # login to", "Flask import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11;", "terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def", "page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): # login", "python # -*- coding: utf-8 -*- from time import sleep import ConfigParser from", "fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver,", "driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement", "= json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\",", "import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import Keys from flask import", "= driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms =", "import Flask import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0", "to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname)", "url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url", "driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security", "google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it", "= linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if __name__ ==", "= driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class,", "import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu", "return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\"))", "driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text", "driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings", "login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement =", "json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\",", "\"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\")", "driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings", "res = {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l']", "privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname,", "emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings", "TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\",", "ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8'))", "= ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout = open(name, \"w\")", "uname, pwd): # login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() #", "InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] =", "\"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\",", "return settings def google(driver, uname, pwd): # login to google url = 'https://accounts.google.com/Login'", "get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver):", "res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\"))", "#!/usr/bin/env python # -*- coding: utf-8 -*- from time import sleep import ConfigParser", "methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw']", "it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy", "loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text", "= fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] =", "@app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] =", "def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd):", "= driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the", "otherwise it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the", "= open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login to linkedin", "return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms", "# login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>)", "return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g']", "FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def", "passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text", "# needs to sleep otherwise it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>)", "LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver):", "GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver)", "linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\")", "Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app =", "fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login to", "not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\")", "import Keys from flask import Flask import atexit import json from selenium.webdriver.common.desired_capabilities import", "def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\",", "x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"]", "driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver)", "user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye():", "will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page", "def twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\")", "'settings-grid')]\").text return settings def twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement", "\"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\"))", "sleep import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import Keys from flask", "terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g'] =", "= dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config =", "twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement", "passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy", "# -*- coding: utf-8 -*- from time import sleep import ConfigParser from selenium", "= driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the privacy page", "from time import sleep import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import", "\"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if __name__ == \"__main__\": app.run(debug=True) atexit.register(bye)", "the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings =", "return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms", "= user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def", "= driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd): # login to", "security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def", "FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver)", "# get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text", "import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36", "res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l']", "= driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] =", "def bye(): driver.quit() def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def", "the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings", "def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def", "to sleep otherwise it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() #", "driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text", "save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): #", "def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET'])", "= 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res", "import webdriver from selenium.webdriver.common.keys import Keys from flask import Flask import atexit import", "res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i']", "Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout =", "= FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] =", "= driver.find_element_by_id(login) loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return", "= driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms =", "= google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] =", "\"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement", "# get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def", "TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver):", "from selenium.webdriver.common.keys import Keys from flask import Flask import atexit import json from", "atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64)", "uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\")", "dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser()", "= LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def", "app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content):", "= driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath(", "= InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb']", "like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver", "= (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\")", "driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings", "-*- coding: utf-8 -*- from time import sleep import ConfigParser from selenium import", "fout.close def linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\")", "driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd): #", "json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver,", "webdriver from selenium.webdriver.common.keys import Keys from flask import Flask import atexit import json", "privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): #", "utf-8 -*- from time import sleep import ConfigParser from selenium import webdriver from", "driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() # get the", "pwd): # login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs", "'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return", "twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit()", "from flask import Flask import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent", "def fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement", "sleep otherwise it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get", "Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw'] = twitter(driver, Config.get(\"twitter\",", "content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login", "settings def fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\"", "= 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en'", "return settings def twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement =", "return settings def fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login =", "login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\")", "from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like", "res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return", "login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep", "driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return", "uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\")", "terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms =", "res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data", "terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms", "@app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver)", "# get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver,", "loginelement.click() # get the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def", "terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {} res['fb']", "driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will not find the element sleep(1)", "res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={}", "emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login) loginelement.click() #", "googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver):", "facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>)", "res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if __name__", "dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver = webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\")", "def google(driver, uname, pwd): # login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname)", "linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if __name__ == \"__main__\":", "needs to sleep otherwise it will not find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click()", "element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class,", "\"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security &", "= \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement = driver.find_element_by_name(\"pass\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) loginelement = driver.find_element_by_id(login)", "driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class,", "settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms", "webdriver.PhantomJS(desired_capabilities=dcap) app = Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name,", "'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy'", "= twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data =", "url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will", "'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update'", "emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings =", "emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get", "{} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] = LinkedInPT(driver)", "# login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement =", "res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\", \"password\")) res['tw']", "GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g'] = google(driver, Config.get(\"google\", \"user\"),Config.get(\"google\",", "= 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will not", "'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms def InstagramPT(driver): url = 'http://instagram.com/legal/privacy/'", "import sleep import ConfigParser from selenium import webdriver from selenium.webdriver.common.keys import Keys from", "the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url", "'content-main')]\").text return settings def fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login", "= Flask(__name__) Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout", "= driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd): # login to google url", "from selenium import webdriver from selenium.webdriver.common.keys import Keys from flask import Flask import", "to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2)", "json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML,", "selenium import webdriver from selenium.webdriver.common.keys import Keys from flask import Flask import atexit", "the privacy page driver.get(\"https://www.facebook.com/settings?tab=privacy\") settings = driver.find_element_by_id('contentArea').text return settings def google(driver, uname, pwd):", "driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings = driver.find_element_by_xpath(\"//div[contains(@class, 'lc-mc')]\") return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url)", "driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text", "coding: utf-8 -*- from time import sleep import ConfigParser from selenium import webdriver", "InstagramPT(driver): url = 'http://instagram.com/legal/privacy/' driver.get(url) terms = driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url", "= driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms =", "settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd): # login", "str(json_data) @app.route(\"/OSPSettings\", methods=['GET']) def GetGlobalSettings(): res={} res['fb'] = fb(driver, Config.get(\"facebook\", \"user\"),Config.get(\"facebook\", \"password\")) res['g']", "url = 'https://www.facebook.com/legal/terms/update' driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms():", "fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement =", "def linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement", "driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url)", "twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res)", "settings def google(driver, uname, pwd): # login to google url = 'https://accounts.google.com/Login' driver.get(url)", "bye(): driver.quit() def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver,", "open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname, pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\")", "settings def twitter(driver, uname, pwd): # login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath(", "settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return terms", "driver.find_element_by_id('hc2content').text return terms def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text", "Config = ConfigParser.ConfigParser() Config.read(\"credentials.ini\") def bye(): driver.quit() def save2file(name, content): fout = open(name,", "driver.get(url) terms = driver.find_element_by_id('content').text return terms @app.route(\"/GetPrivacyTerms\", methods=['GET']) def GetPrivacyTerms(): res = {}", "def GetPrivacyTerms(): res = {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] =", "= driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return", "get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return", "Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap = dict(DesiredCapabilities.PHANTOMJS) dcap[\"phantomjs.page.settings.userAgent\"] = user_agent driver =", "(\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap", "google(driver, uname, pwd): # login to google url = 'https://accounts.google.com/Login' driver.get(url) driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click()", "passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname,", "emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def", "= 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'legal')]\").text return terms def FBPT(driver): url =", "res['l'] = LinkedInPT(driver) res['i'] = InstagramPT(driver) json_data = json.dumps(res) return str(json_data) @app.route(\"/OSPSettings\", methods=['GET'])", "settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd): # login", "def TwitterPT(driver): url = 'https://twitter.com/privacy?lang=en' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def", "\"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data = json.dumps(res) return str(json_data) if", "pwd): # login to linkedin driver.get(\"http://linkedin.com/uas/login\") emailelement = driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname)", "pwd): # login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement = driver.find_element_by_name(\"email\") passwordelement", "find the element sleep(1) driver.find_element_by_id(\"Passwd\").send_keys(<PASSWORD>) driver.find_element_by_id(\"signIn\").click() # get the privacy page driver.get(\"https://myaccount.google.com/privacy?pli=1\") settings", "driver.find_element_by_id(\"Email\").send_keys(uname) driver.find_element_by_id(\"next\").click() # needs to sleep otherwise it will not find the element", "driver.find_element_by_xpath( \"//div[contains(@class, 'settings-grid')]\").text return settings def twitter(driver, uname, pwd): # login to twitter", "(X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\") dcap =", "passwordelement.send_keys(<PASSWORD>) passwordelement.submit() # get the security & privacy settings driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath(", "return settings.text def googlePT(driver): url = 'https://www.google.com/policies/privacy/' driver.get(url) terms = driver.find_element_by_xpath(\"//div[contains(@class, 'maia-article')]\").text return", "flask import Flask import atexit import json from selenium.webdriver.common.desired_capabilities import DesiredCapabilities user_agent =", "driver.find_element_by_id(\"session_key-login\") passwordelement = driver.find_element_by_id(\"session_password-login\") emailelement.send_keys(uname) passwordelement.send_keys(<PASSWORD>) passwordelement.submit() sleep(2) driver.get(\"https://www.linkedin.com/psettings/\") settings = driver.find_element_by_xpath( \"//div[contains(@class,", "login to twitter driver.get(\"http://twitter.com/login\") emailelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[username_or_email]']\") passwordelement = driver.find_element_by_xpath( \"//div[@class='signin-wrapper']//input[@name='session[password]']\") emailelement.send_keys(uname)", "driver.get(\"https://twitter.com/settings/security\") settings = driver.find_element_by_xpath( \"//div[contains(@class, 'content-main')]\").text return settings def fb(driver, uname, pwd): #", "res['tw'] = twitter(driver, Config.get(\"twitter\", \"user\"),Config.get(\"twitter\", \"password\")) res['l'] = linkedin(driver, Config.get(\"linkedin\", \"user\"),Config.get(\"linkedin\", \"password\")) json_data", "driver.quit() def save2file(name, content): fout = open(name, \"w\") fout.write(content.encode('utf8')) fout.close def linkedin(driver, uname,", "= {} res['fb'] = FBPT(driver) res['g'] = googlePT(driver) res['tw'] = TwitterPT(driver) res['l'] =", "fb(driver, uname, pwd): # login to facebook driver.get(\"https://facebook.com\") login = \"loginbutton\" emailelement =", "terms = driver.find_element_by_xpath(\"//div[contains(@class, 'UserPolicy-content')]\").text return terms def LinkedInPT(driver): url = 'https://www.linkedin.com/legal/privacy-policy' driver.get(url) terms" ]
[ "from django import forms import voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image", "forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image", "= forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set", "models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point", "queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\",", "django import forms import voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\",", "forms import voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class", "TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name'))", "image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class", "class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\",", "queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\",", "import forms import voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name'))", "image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) camera_set =", "queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) camera_set = forms.ModelChoiceField(label=\"Camera Set\", queryset=models.CameraSet.objects.all().order_by('name'))", "voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud", "PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class", "class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name'))", "point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form):", "forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set =", "ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) camera_set", "class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name'))", "= forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) camera_set = forms.ModelChoiceField(label=\"Camera", "= forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form):", "Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image = forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image", "forms.ModelChoiceField(label=\"Image\", queryset=models.Image.objects.all().order_by('name')) class CameraForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) camera_set = forms.ModelChoiceField(label=\"Camera Set\",", "as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud =", "import voxel_globe.meta.models as models class TiePointForm(forms.Form): image_set = forms.ModelChoiceField(label=\"Image Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form):", "Set\", queryset=models.ImageSet.objects.all().order_by('name')) class PointCloudForm(forms.Form): point_cloud = forms.ModelChoiceField(label=\"Point Cloud\", queryset=models.PointCloud.objects.all().order_by('name')) class ImageForm(forms.Form): image =" ]
[ "tk import asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05): try: while True:", "app.update() await asyncio.sleep(interval) except tk.TclError as e: if \"application has been destroyed\" not", "except tk.TclError as e: if \"application has been destroyed\" not in e.args[0]: raise", "['run_loop'] async def run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except tk.TclError", "while True: app.update() await asyncio.sleep(interval) except tk.TclError as e: if \"application has been", "run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except tk.TclError as e: if", "interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except tk.TclError as e: if \"application", "asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05): try: while True: app.update() await", "= ['run_loop'] async def run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except", "await asyncio.sleep(interval) except tk.TclError as e: if \"application has been destroyed\" not in", "asyncio.sleep(interval) except tk.TclError as e: if \"application has been destroyed\" not in e.args[0]:", "True: app.update() await asyncio.sleep(interval) except tk.TclError as e: if \"application has been destroyed\"", "try: while True: app.update() await asyncio.sleep(interval) except tk.TclError as e: if \"application has", "async def run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except tk.TclError as", "__all__ = ['run_loop'] async def run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval)", "coding=utf8 import tkinter as tk import asyncio __all__ = ['run_loop'] async def run_loop(app,", "import asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05): try: while True: app.update()", "as tk import asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05): try: while", "import tkinter as tk import asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05):", "def run_loop(app, interval=0.05): try: while True: app.update() await asyncio.sleep(interval) except tk.TclError as e:", "tkinter as tk import asyncio __all__ = ['run_loop'] async def run_loop(app, interval=0.05): try:", "# coding=utf8 import tkinter as tk import asyncio __all__ = ['run_loop'] async def", "<gh_stars>10-100 # coding=utf8 import tkinter as tk import asyncio __all__ = ['run_loop'] async" ]
[ "print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape)", "imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen", "np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256))", "256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1]", "cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR) print(imagen.shape) cv2.imwrite('matrizColorImagen.jpg',imagen) #cv2.imwrite('resizeImagen.jpg',imagen) #cv2.imshow('image',imagen)", "imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32)", "numpy as np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen =", "imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR) print(imagen.shape)", "imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen", "import cv2 import numpy as np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape)", "cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg')", "cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0", "cv2 import numpy as np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0])", "as np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256,", "= cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] =", "cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)", "0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz)", "cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2]", "print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz =", "= 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen =", "0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR) print(imagen.shape) cv2.imwrite('matrizColorImagen.jpg',imagen) #cv2.imwrite('resizeImagen.jpg',imagen)", "= cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0", "= cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] = 0", "imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0]", "= cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen =", "cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen)", "= 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR) print(imagen.shape) cv2.imwrite('matrizColorImagen.jpg',imagen)", "import numpy as np imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen", "0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR)", "= 0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape)", "imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen", "imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] =", "print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0])", "print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] = 0 cv2.imwrite('grayimagen.jpg',imagen) matriz", "imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY) print(imagen.shape) print(imagen[0][0]) imagen[0][0] = 0 imagen[0][1] = 0 imagen[0][2] =", "matriz = np.zeros((256,256),np.float32) print(matriz.shape) cv2.imwrite('matrizImagen.jpg',matriz) imagen = cv2.cvtColor(matriz,cv2.COLOR_GRAY2BGR) print(imagen.shape) cv2.imwrite('matrizColorImagen.jpg',imagen) #cv2.imwrite('resizeImagen.jpg',imagen) #cv2.imshow('image',imagen) #cv2.waitKey(0)", "= cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB) print(imagen.shape) print(imagen[0][0][0]) imagen = cv2.resize(imagen,(256, 256)) imagen = cv2.imread('imagen.jpg') imagen =" ]
[ "currentTop-1 #prune to the right tree elif (i > (currentLower+currentUpper-1)/2 and i <", "currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1]))", "(currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1 #prune", "(i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper =", "#prune to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower", "tree elif (i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower =", "(i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper", "currentLevel = h currentTop = pow(2, h)-1 currentLower = 1 currentUpper = pow(2,", "pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop = pow(2, h)-1 currentLower =", "> (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper =", "currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1 #prune to the", "currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1])) currentLevel =", "currentLower = 1 currentUpper = pow(2, h)-2 topList = [] while currentLevel >", "if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop = pow(2,", "for i in q: if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel =", "= (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1])) currentLevel = 0", "h)-1 currentLower = 1 currentUpper = pow(2, h)-2 topList = [] while currentLevel", "solution(h, q): returnList = [] for i in q: if (i == pow(2,", "left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the", "currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1 #prune to", "(currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1 #prune to the right tree", "q): returnList = [] for i in q: if (i == pow(2, h)-1):", "= pow(2, h)-1 currentLower = 1 currentUpper = pow(2, h)-2 topList = []", "(currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1])) currentLevel = 0 return", "pow(2, h)-2 topList = [] while currentLevel > 1: topList.append(currentTop) #prune to left", "stays the same currentUpper = currentTop-1 #prune to the right tree elif (i", "(i == pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop = pow(2, h)-1", "currentUpper = pow(2, h)-2 topList = [] while currentLevel > 1: topList.append(currentTop) #prune", "h)-2 topList = [] while currentLevel > 1: topList.append(currentTop) #prune to left tree", "if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper", "= h currentTop = pow(2, h)-1 currentLower = 1 currentUpper = pow(2, h)-2", "currentUpper = currentTop-1 #prune to the right tree elif (i > (currentLower+currentUpper-1)/2 and", "returnList = [] for i in q: if (i == pow(2, h)-1): returnList.append(-1)", "h)-1): returnList.append(-1) else: currentLevel = h currentTop = pow(2, h)-1 currentLower = 1", "#currentLower stays the same currentUpper = currentTop-1 #prune to the right tree elif", "h currentTop = pow(2, h)-1 currentLower = 1 currentUpper = pow(2, h)-2 topList", "[] while currentLevel > 1: topList.append(currentTop) #prune to left tree if (i <=", "currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1])) currentLevel = 0 return returnList", "<= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1", "elif (i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1", "= 1 currentUpper = pow(2, h)-2 topList = [] while currentLevel > 1:", "pow(2, h)-1 currentLower = 1 currentUpper = pow(2, h)-2 topList = [] while", "topList.append(currentTop) #prune to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2", "= currentTop-1 #prune to the right tree elif (i > (currentLower+currentUpper-1)/2 and i", "currentLevel > 1: topList.append(currentTop) #prune to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1", "#prune to the right tree elif (i > (currentLower+currentUpper-1)/2 and i < currentTop):", "== pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop = pow(2, h)-1 currentLower", "def solution(h, q): returnList = [] for i in q: if (i ==", "1: topList.append(currentTop) #prune to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop =", "currentTop = pow(2, h)-1 currentLower = 1 currentUpper = pow(2, h)-2 topList =", "< currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i ==", "in q: if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop", "topList = [] while currentLevel > 1: topList.append(currentTop) #prune to left tree if", "same currentUpper = currentTop-1 #prune to the right tree elif (i > (currentLower+currentUpper-1)/2", "currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop):", "= [] for i in q: if (i == pow(2, h)-1): returnList.append(-1) else:", "= pow(2, h)-2 topList = [] while currentLevel > 1: topList.append(currentTop) #prune to", "the same currentUpper = currentTop-1 #prune to the right tree elif (i >", "q: if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel = h currentTop =", "= (currentLower+currentUpper-1)/2 #currentLower stays the same currentUpper = currentTop-1 #prune to the right", "to the right tree elif (i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1", "else: currentLevel = h currentTop = pow(2, h)-1 currentLower = 1 currentUpper =", "(currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1", "i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i", "i in q: if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel = h", "returnList.append(-1) else: currentLevel = h currentTop = pow(2, h)-1 currentLower = 1 currentUpper", "while currentLevel > 1: topList.append(currentTop) #prune to left tree if (i <= (currentLower+currentUpper-1)/2):", "[] for i in q: if (i == pow(2, h)-1): returnList.append(-1) else: currentLevel", "tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays the same", "right tree elif (i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1 currentLower", "currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if (i == currentTop): returnList.append(int(topList[len(topList)-1])) currentLevel", "the right tree elif (i > (currentLower+currentUpper-1)/2 and i < currentTop): currentLevel-=1 currentTop-=1", "1 currentUpper = pow(2, h)-2 topList = [] while currentLevel > 1: topList.append(currentTop)", "> 1: topList.append(currentTop) #prune to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop", "and i < currentTop): currentLevel-=1 currentTop-=1 currentLower = (currentLower+currentUpper-1)/2+1 currentUpper = currentTop-1 if", "= [] while currentLevel > 1: topList.append(currentTop) #prune to left tree if (i", "to left tree if (i <= (currentLower+currentUpper-1)/2): currentLevel-=1 currentTop = (currentLower+currentUpper-1)/2 #currentLower stays" ]
[ "1 else: break print(f'Fim do programa.\\nA soma é {j} e foram digitados {l}", "número inteiro:\\n')) if c != 999: j = j + c l =", "j + c l = l + 1 else: break print(f'Fim do programa.\\nA", "l = l + 1 else: break print(f'Fim do programa.\\nA soma é {j}", "+ c l = l + 1 else: break print(f'Fim do programa.\\nA soma", "print('Digite 999 para parar.') while True: c = int(input('Digite um número inteiro:\\n')) if", "um número inteiro:\\n')) if c != 999: j = j + c l", "c l = l + 1 else: break print(f'Fim do programa.\\nA soma é", "sem considerar o flag l = j = 0 print('Digite 999 para parar.')", "o flag l = j = 0 print('Digite 999 para parar.') while True:", "flag l = j = 0 print('Digite 999 para parar.') while True: c", "= l + 1 else: break print(f'Fim do programa.\\nA soma é {j} e", "#soma sem considerar o flag l = j = 0 print('Digite 999 para", "0 print('Digite 999 para parar.') while True: c = int(input('Digite um número inteiro:\\n'))", "while True: c = int(input('Digite um número inteiro:\\n')) if c != 999: j", "else: break print(f'Fim do programa.\\nA soma é {j} e foram digitados {l} números.')", "l + 1 else: break print(f'Fim do programa.\\nA soma é {j} e foram", "= j = 0 print('Digite 999 para parar.') while True: c = int(input('Digite", "= 0 print('Digite 999 para parar.') while True: c = int(input('Digite um número", "+ 1 else: break print(f'Fim do programa.\\nA soma é {j} e foram digitados", "999 para parar.') while True: c = int(input('Digite um número inteiro:\\n')) if c", "= j + c l = l + 1 else: break print(f'Fim do", "999: j = j + c l = l + 1 else: break", "l = j = 0 print('Digite 999 para parar.') while True: c =", "j = j + c l = l + 1 else: break print(f'Fim", "j = 0 print('Digite 999 para parar.') while True: c = int(input('Digite um", "para parar.') while True: c = int(input('Digite um número inteiro:\\n')) if c !=", "if c != 999: j = j + c l = l +", "!= 999: j = j + c l = l + 1 else:", "considerar o flag l = j = 0 print('Digite 999 para parar.') while", "int(input('Digite um número inteiro:\\n')) if c != 999: j = j + c", "= int(input('Digite um número inteiro:\\n')) if c != 999: j = j +", "True: c = int(input('Digite um número inteiro:\\n')) if c != 999: j =", "parar.') while True: c = int(input('Digite um número inteiro:\\n')) if c != 999:", "c != 999: j = j + c l = l + 1", "inteiro:\\n')) if c != 999: j = j + c l = l", "c = int(input('Digite um número inteiro:\\n')) if c != 999: j = j" ]
[ "y_full, bs): def split(arr, size): arrays = [] while len(arr) > size: slice_", "log(\"## Model params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\":", "= kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i", "test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments # arg = load_arguments() model_uri", "= df.values # just keep np array here for simplicity. norm_constant = np.max(df)", "from pathlib import Path path = Path(filepath).parent for i in range(1, sublevel +", "\"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256}", "\"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars =", "Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset", "def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading params #######################################\") model_pars, data_pars,", "fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator,", "= kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE:", "df / norm_constant # small leak to the test set here. x_train_batch, y", "on_save_callback is not None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step > max_grad_steps:", "x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f", "just keep np array here for simplicity. norm_constant = np.max(df) df = df", "disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars)", "path_add) return path def log(*s, n=0, m=1): sspace = \"#\" * n sjump", "while len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return", "5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\":", "= data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4 for", "model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars,", "log(\"############ Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def", "None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break return", "= \"#\" * n sjump = \"\\n\" * m print(sjump, sspace, s, sspace,", "fiting ---') initial_grad_step = load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step", "#################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package", "= os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\":", "norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length +", "NBeatsNet VERBOSE = False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\"", "== 0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path =", "y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test,", "ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant", "out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments # arg =", "backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output)", "plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length +", "print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw):", "size=4, replace=False)): ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i]", "x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size", "net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss", "+ \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\":", "net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1)", "on_save_callback(net, x, target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break return net,", "backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as", "= data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output =", "= {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100 == 0 or (grad_step", "import numpy as np import torch from torch import optim from torch.nn import", "222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy =", "= path.parent path = os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1): sspace", "optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device,", "sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars =", "package root folder \"\"\" from pathlib import Path path = Path(filepath).parent for i", "> max_grad_steps: print('Finished.') break return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw):", "x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import", "fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net,", "n sjump = \"\\n\" * m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### #", "sspace = \"#\" * n sjump = \"\\n\" * m print(sjump, sspace, s,", "data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\") device", "223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx,", "model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10,", "def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path params ################################################\") data_path", "**kw): if choice == 0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1,", "for rr in split((x_full, y_full), bs): yield rr ###################################################################################################### # Model fit def", "dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def plot(net, x,", "forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved", "= fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars)", "10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\":", "plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path']", "backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### #", "out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments # arg = load_arguments() model_uri =", "_, _ = get_dataset(**data_pars) test_losses = [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float))", "path = os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1): sspace = \"#\"", "y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup", "8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0,", "out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const", "functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package root folder \"\"\"", "len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0]", "if VERBOSE: print(df.head(5)) #### Preprocess df = df.values # just keep np array", "loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}')", "grad_step) if on_save_callback is not None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step", "= kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i", "* norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length", "keep np array here for simplicity. norm_constant = np.max(df) df = df /", "x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[...,", "plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save and load model helper", "backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step", "checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\",", "line arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\")", "= {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars =", "> size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while", "optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step =", "x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def", "ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length,", "target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f =", "matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222,", "dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100", "[221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)):", "index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df = df.values # just keep", "yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length +", "in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx,", "\"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False,", "data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import", "VERBOSE = False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get", "plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"]", "{\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\":", "= predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command", "viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test, _ =", "{loss.item():.6f}') if grad_step % 100 == 0 or (grad_step < 100 and grad_step", "Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio =", "optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step", "target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size =", "backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length),", "path.parent path = os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1): sspace =", "yield rr ###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device", "= compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen", "get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model,", "sublevel + 1): path = path.parent path = os.path.join(path.absolute(), path_add) return path def", "grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser,", "data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"]", "= 1 x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses = [] model.eval()", "'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint", "compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full,", "model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME):", "= compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223,", "np array here for simplicity. norm_constant = np.max(df) df = df / norm_constant", "Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ == '__main__': VERBOSE = True", "out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading params", "break return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1", "y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup session optimiser", "optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward()", "compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading params #######################################\")", "224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy", "_, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88)", "with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not None: on_save_callback(net, x, target,", "target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot", "mlmodels.models import module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############", "**kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses =", "target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length,", "x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model =", "/ norm_constant # small leak to the test set here. x_train_batch, y =", "plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False):", "image to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"]", "[7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\":", "not None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break", "return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0:", "enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant,", "Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\"", "if grad_step % 100 == 0 or (grad_step < 100 and grad_step %", "x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"####", "{}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length =", "Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred,", "+ 1): path = path.parent path = os.path.join(path.absolute(), path_add) return path def log(*s,", "CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step']", "\"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars", "p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as", "x, target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break return net, optimiser", "############################################################################################################### # save and load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({", "data_gen = data_generator(x_train, y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ### fit", "bs): def split(arr, size): arrays = [] while len(arr) > size: slice_ =", "i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch", "data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _ =", "device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\":", "############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model,", "kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df", "y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr, size): arrays = [] while", "= compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step)", "print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars,", "len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays", "grad_step % 100 == 0 or (grad_step < 100 and grad_step % 100", "def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length =", "plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length", "log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() +", "plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _,", "plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ###############################################################################################################", "Model fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred", "x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ###", "rr ###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device =", "arr = arr[size:] arrays.append(arr) return arrays while True: for rr in split((x_full, y_full),", "norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b')", "the module package root folder \"\"\" from pathlib import Path path = Path(filepath).parent", "flush=True) #################################################################################################### # Model Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path", "as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### # Helper", "arrays.append(arr) return arrays while True: for rr in split((x_full, y_full), bs): yield rr", "return path def log(*s, n=0, m=1): sspace = \"#\" * n sjump =", "for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x,", "path def log(*s, n=0, m=1): sspace = \"#\" * n sjump = \"\\n\"", "as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223,", "out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots =", "{}.'.format(output)) ############################################################################################################### # save and load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"):", "_ = get_dataset(**data_pars) test_losses = [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f,", "data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"]", "f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars,", "from mlmodels.models import module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars) print(module, model)", "data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test,", "torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False", "device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] #", "size): arrays = [] while len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr", "compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen =", "predict module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\") model,", "sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"####", "exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model", "range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b')", "Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package root folder", "fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length =", "\"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars def", "log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model", "compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224]", "Path(filepath).parent for i in range(1, sublevel + 1): path = path.parent path =", "\"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return model_pars, data_pars,", "---') initial_grad_step = load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step +=", "log(\"#### Model fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\")", "\"\"\" get the module package root folder \"\"\" from pathlib import Path path", "data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('---", "y = np.array(y)[..., 0] #### Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train", "torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step %", "x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant", "print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ == '__main__': VERBOSE", "bs): yield rr ###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw):", "max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser) for grad_step, (x, target) in", "helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(),", "xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff,", "f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in", "#########################\") from mlmodels.models import module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars) print(module,", "model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0,", "data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz", "os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5,", "0] #### Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c]", "test_losses = [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p", "}, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME)", "#{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net,", "np import torch from torch import optim from torch.nn import functional as F", "x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full,", "# greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train,", "net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in", "def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step =", "= f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i", "to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length", "np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split c = int(len(x_train_batch) * train_split_ratio)", "<reponame>gitter-badger/mlmodels import os import pandas as pd import numpy as np import torch", "command line arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params", "= [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p =", "False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"}", "compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data", "100 == 0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not None:", "# small leak to the test set here. x_train_batch, y = [], []", "- backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0]", "[], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df)", "slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while True: for", "= data_generator(x_train, y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ### fit model", "data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if", "0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd()", "True: for rr in split((x_full, y_full), bs): yield rr ###################################################################################################### # Model fit", "= torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5,", "grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"]", "train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train,", "log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\")", "F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### # Helper functions", "{\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True):", "backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf()", "target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device))", "backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz", "norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g')", "= [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx,", "in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss", "= f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length),", "# plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output))", "#################################################################################################### # Model Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path =", "222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff,", "plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output)", "optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 #############################################################################################################", "{\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\") device = torch.device('cpu')", "0 or (grad_step < 100 and grad_step % 100 == 0): with torch.no_grad():", "here. x_train_batch, y = [], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for", "arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars,", "load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train()", "the command line arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars =", "Model preparation #########################\") from mlmodels.models import module_load_full, fit, predict module, model = module_load_full(model_uri,", "simplicity. norm_constant = np.max(df) df = df / norm_constant # small leak to", "False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png',", "from torch import optim from torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model", "df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df = df.values", "grad_step % 100 == 0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is", "train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) ####", "forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})')", "fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred =", "df.values # just keep np array here for simplicity. norm_constant = np.max(df) df", "\"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False,", "- forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y", "forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output", "and load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict':", "# Model Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path']", "plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars):", "save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def", "rr in split((x_full, y_full), bs): yield rr ###################################################################################################### # Model fit def fit(model,", "path = Path(filepath).parent for i in range(1, sublevel + 1): path = path.parent", "if choice == 0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path)", "dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4):", "data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser) for", "initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device))", "pd import numpy as np import torch from torch import optim from torch.nn", "\"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############", "= F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if", "max_grad_steps: print('Finished.') break return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"]", "plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length,", "data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"####", "arrays = [] while len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr =", "Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length =", "for i in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0,", "= {\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\",", "out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ == '__main__':", "module package root folder \"\"\" from pathlib import Path path = Path(filepath).parent for", "optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _,", "model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models", "4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net,", "small leak to the test set here. x_train_batch, y = [], [] backcast_length", "fit, predict module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\")", "Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess)", "log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model", "torch from torch import optim from torch.nn import functional as F #################################################################################################### from", "x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses = [] model.eval() _, f", "for simplicity. norm_constant = np.max(df) df = df / norm_constant # small leak", "optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return", "== 0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not None: on_save_callback(net,", "line arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path)", "'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME =", "model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0", "= data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz #", "device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\":", "import Path path = Path(filepath).parent for i in range(1, sublevel + 1): path", "\"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\":", "model) log(\"############ Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit", "color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save and load model", "batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model,", "= [] while len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:]", "model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\")", "###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu')", "= optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars)", "= data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots =", "plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy =", "out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant =", "################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True)", "n=0, m=1): sspace = \"#\" * n sjump = \"\\n\" * m print(sjump,", "log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params", "out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses", "optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net, optimiser def fit_simple(net, optimiser,", "#################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### # Helper functions def", "data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments #", "xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length", "= np.array(y)[..., 0] #### Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train =", "* norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx,", "data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path,", "= Path(filepath).parent for i in range(1, sublevel + 1): path = path.parent path", "choice == 0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path", "split(arr, size): arrays = [] while len(arr) > size: slice_ = arr[:size] arrays.append(slice_)", "Get Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train,", "= {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\") device =", "= kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess", "def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length", "optim from torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE", "print('Saved image to {}.'.format(output)) ############################################################################################################### # save and load model helper function def", "import matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221,", "greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train,", "Setup session optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser, data_gen,", "= p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0,", "NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict", "preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the", "### Setup session optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser,", "log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full, fit, predict module, model =", "norm_constant # small leak to the test set here. x_train_batch, y = [],", "print('Finished.') break return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] =", "backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] ####", "#########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3,", "= 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored", "get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars)", "numpy as np import torch from torch import optim from torch.nn import functional", "# just keep np array here for simplicity. norm_constant = np.max(df) df =", "if not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test,", "os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.')", "return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path", "to {}.'.format(output)) ############################################################################################################### # save and load model helper function def save(model, optimiser,", "= [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4,", "import torch from torch import optim from torch.nn import functional as F ####################################################################################################", "as np import torch from torch import optim from torch.nn import functional as", "than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test,", "def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict'])", "range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[...,", "= x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test,", "print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet #################################################################################################### #", "def log(*s, n=0, m=1): sspace = \"#\" * n sjump = \"\\n\" *", "as pd import numpy as np import torch from torch import optim from", "xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id])", "optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint =", "f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for", "compute_pars[\"batch_size\"] # greater than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if", "y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full,", "df = df.values # just keep np array here for simplicity. norm_constant =", "= {loss.item():.6f}') if grad_step % 100 == 0 or (grad_step < 100 and", "pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df = df.values # just", "folder \"\"\" from pathlib import Path path = Path(filepath).parent for i in range(1,", "x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup session", "'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint", "split((x_full, y_full), bs): yield rr ###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None,", "save and load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step,", "pandas as pd import numpy as np import torch from torch import optim", "def data_generator(x_full, y_full, bs): def split(arr, size): arrays = [] while len(arr) >", "= model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def", "4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test,", "256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"}", "array here for simplicity. norm_constant = np.max(df) df = df / norm_constant #", "out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for", "y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length),", "fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############", "yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to", "y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser=", "in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i] * norm_constant, x_test[i] *", "module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit", "test set here. x_train_batch, y = [], [] backcast_length = kw['backcast_length'] forecast_length =", "return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test,", "backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step", "preparation #########################\") from mlmodels.models import module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars)", "predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _", "Model params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device,", "# disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x, target, backcast_length,", "data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"####", "+ \"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the", "= NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\",", "###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ == '__main__': VERBOSE = True test()", "224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i],", "or (grad_step < 100 and grad_step % 100 == 0): with torch.no_grad(): save(net,", "ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars,", "predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if", "torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME", "_, _, _ = get_dataset(**data_pars) test_losses = [] model.eval() _, f = model(torch.tensor(x_test,", "model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict':", "import pandas as pd import numpy as np import torch from torch import", "x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train,", "optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast,", "forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4", "path_add=\"\"): \"\"\" get the module package root folder \"\"\" from pathlib import Path", "net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net, optimiser def fit_simple(net,", "plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i])", "print('Saved image to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length =", "+ forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved", "y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars)", "def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _,", "params ################################################\") data_path = os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path,", "log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading", "p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length =", "and grad_step % 100 == 0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback", "here for simplicity. norm_constant = np.max(df) df = df / norm_constant # small", "return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line", "dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length,", "net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test,", "sess) log(\"############ Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds)", "in split((x_full, y_full), bs): yield rr ###################################################################################################### # Model fit def fit(model, data_pars,", "Model Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio", "ff, color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image", "to the test set here. x_train_batch, y = [], [] backcast_length = kw['backcast_length']", "0] y = np.array(y)[..., 0] #### Split c = int(len(x_train_batch) * train_split_ratio) x_train,", "if grad_step > max_grad_steps: print('Finished.') break return net, optimiser def predict(model, data_pars, compute_pars=None,", "% 100 == 0 or (grad_step < 100 and grad_step % 100 ==", "### Get Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train,", "data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than 4 for", "data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading", "dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss =", "optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100 == 0", "norm_constant def data_generator(x_full, y_full, bs): def split(arr, size): arrays = [] while len(arr)", "# Model fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length", "torch import optim from torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import", "data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full, fit, predict module, model", "Preprocess df = df.values # just keep np array here for simplicity. norm_constant", "[NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8],", "grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length", "checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if", "norm_constant = np.max(df) df = df / norm_constant # small leak to the", "size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while True:", "data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds = predict(model, module,", "data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\") device = torch.device('cpu') model_pars", "forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float))", "grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots", "fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds =", "def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"]", "matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path", "data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__", "log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ == '__main__': VERBOSE =", "+ forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save", "test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def plot(net, x, target,", "torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\":", "def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments # arg = load_arguments()", "\"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10}", "Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"):", "os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1): sspace = \"#\" * n", "= data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot =", "test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars,", "np.max(df) df = df / norm_constant # small leak to the test set", "y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length", "import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False ####################################################################################################", "1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df =", "= np.max(df) df = df / norm_constant # small leak to the test", "[] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df) -", "data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser) for grad_step, (x, target)", "i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i] * norm_constant, x_test[i]", "##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading", "get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ###", "compute_pars=None, out_pars=None, **kw): data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _ = get_dataset(**data_pars)", "not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p,", "10} log(\"## Model params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK],", "params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\")", "({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x,", "#################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df", "100 == 0 or (grad_step < 100 and grad_step % 100 == 0):", "subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)),", "\"\\n\" * m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model Model =", "= df / norm_constant # small leak to the test set here. x_train_batch,", "= os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1): sspace = \"#\" *", "1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return model_pars,", "data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"]", "model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return", "disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars,", "data_generator(x_train, y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters()) ### fit model net,", "y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr, size): arrays =", "model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train,", "= module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\") model, sess= fit(model, module,", "compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars,", "plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def", "p = f.detach().numpy() return p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step,", "# Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package root", "= pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df = df.values #", "forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split c =", "backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot", "= out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88)", "from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### # Helper functions def os_package_root_path(filepath,", "load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars =", "command line arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0,", "data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] #", "module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments", "= get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\")", "image to {}.'.format(output)) ############################################################################################################### # save and load model helper function def save(model,", "session optimiser = optim.Adam(model.parameters()) ### fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model,", "i in range(1, sublevel + 1): path = path.parent path = os.path.join(path.absolute(), path_add)", "os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"##", "out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds = predict(model, module, sess,", "+ forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf()", "x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length,", "color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output =", "# greater than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not", "fit def fit(model, data_pars, compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"]", "plt net.eval() _, f = net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224]", "os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars", "for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length])", "#### Preprocess df = df.values # just keep np array here for simplicity.", "= os_package_root_path(__file__, sublevel=1, path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path)", "###loading the command line arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars", "m=1): sspace = \"#\" * n sjump = \"\\n\" * m print(sjump, sspace,", "mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0,", "(x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x,", "forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save and", "module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars,", "y[:c] x_test, y_test = x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant def", "color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output))", "###loading the command line arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"####", "log(*s, n=0, m=1): sspace = \"#\" * n sjump = \"\\n\" * m", "sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw):", "the test set here. x_train_batch, y = [], [] backcast_length = kw['backcast_length'] forecast_length", "_ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup session optimiser =", "# batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz # disable_plot =", "sjump = \"\\n\" * m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model", "forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i])", "0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path params", "the command line arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading", "reset=True): ###loading the command line arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\"", "model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def plot(net,", "def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package root folder \"\"\" from", "= torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater", "get the module package root folder \"\"\" from pathlib import Path path =", "dataset #######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\")", "fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net, optimiser", "{\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7,", "kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i +", "while True: for rr in split((x_full, y_full), bs): yield rr ###################################################################################################### # Model", "False) if not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def plot_predict(x_test,", "if on_save_callback is not None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step >", "\"#\" * n sjump = \"\\n\" * m print(sjump, sspace, s, sspace, flush=True)", "#######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model", "\"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100,", "compute_pars=None, out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size", "loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100 ==", "**kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"]", "0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not None: on_save_callback(net, x,", "norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222,", "output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for plot_id,", "{str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100 == 0 or (grad_step <", "optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser)", "p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length),", "print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step % 100 == 0 or", "arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while True: for rr in split((x_full,", "sublevel=0, path_add=\"\"): \"\"\" get the module package root folder \"\"\" from pathlib import", "torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ############################################################################################################### def plot(net, x, target, backcast_length,", "= arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while True: for rr", "torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not None: on_save_callback(net, x, target, grad_step,", "compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length, grad_step) def", "yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant plt.subplot(subplots[plot_id]) plt.grid()", "root folder \"\"\" from pathlib import Path path = Path(filepath).parent for i in", "f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy,", "greater than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot:", "plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save and load model helper function", "\"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\":", "1 x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses = [] model.eval() _,", "# Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df =", "yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output", "Path path = Path(filepath).parent for i in range(1, sublevel + 1): path =", "net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---')", "[221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy", "optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model,", "grad_step > max_grad_steps: print('Finished.') break return net, optimiser def predict(model, data_pars, compute_pars=None, out_pars=None,", "data_gen, plot_model, device, data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device,", "int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:]", "batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\",", "grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'):", "f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p ###############################################################################################################", "grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast =", "backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval() _, f = net(torch.tensor(x,", "parse_dates=True) if VERBOSE: print(df.head(5)) #### Preprocess df = df.values # just keep np", "save(net, optimiser, grad_step) if on_save_callback is not None: on_save_callback(net, x, target, grad_step, data_pars)", "'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if", "out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading", "Data x_train, y_train, x_test, y_test, _ = get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size)", "module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds = predict(model,", "x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr, size): arrays = []", "arrays while True: for rr in split((x_full, y_full), bs): yield rr ###################################################################################################### #", "x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g')", "leak to the test set here. x_train_batch, y = [], [] backcast_length =", "NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\":", "\"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return", "fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred)", "= data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than 4", "output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x, target,", "compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot", "import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"]", "grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast,", "module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\") model, sess=", "path_add=data_path) out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\":", "enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss =", "is not None: on_save_callback(net, x, target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.')", "i in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i] plt.subplot(subplots[i]) plt.plot(range(0, backcast_length),", "load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step", "= arr[size:] arrays.append(arr) return arrays while True: for rr in split((x_full, y_full), bs):", "data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path params ################################################\") data_path = os_package_root_path(__file__,", "print(module, model) log(\"############ Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={})", "from torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE =", "= checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0,", "optimiser, data_gen, plot_model, device, data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback,", "##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction", "viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x, target,", "on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser) for grad_step,", "setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser = fit(model,", "optimiser = fit(model, data_pars, compute_pars) log(\"#### Predict #############################################\") ypred = predict(model, data_pars, compute_pars,", "data_pars) if grad_step > max_grad_steps: print('Finished.') break return net, optimiser def predict(model, data_pars,", "initial_grad_step = load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step", "= np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split c = int(len(x_train_batch) *", "np.array(y)[..., 0] #### Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c],", "= get_dataset(**data_pars) data_gen = data_generator(x_train, y_train, batch_size) ### Setup session optimiser = optim.Adam(model.parameters())", "norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit", "= f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x, target, grad_step,", "for grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad() net.train() backcast, forecast", "##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser = fit(model, data_pars,", "y = [], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i in", "out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full, fit,", "device, data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500):", "# arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars,", "params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\")", "get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path params ################################################\") data_path =", "= f.detach().numpy() return p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"):", "def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), }, CHECKPOINT_NAME)", "y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split", "False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module", "import optim from torch.nn import functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet", "backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length):", "CHECKPOINT_NAME) def load(model, optimiser, CHECKPOINT_NAME = 'nbeats-fiting-checkpoint.th'): if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict'])", "Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test", "compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test,", "grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"####", "print('--- fiting ---') initial_grad_step = load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator):", "success\", sess) log(\"############ Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars)", "os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the module package root folder \"\"\" from pathlib", "VERBOSE: print(df.head(5)) #### Preprocess df = df.values # just keep np array here", "torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size = compute_pars[\"batch_size\"] # greater than", "backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots", "plt.subplots_adjust(top=0.88) for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i]", "loss = {loss.item():.6f}') if grad_step % 100 == 0 or (grad_step < 100", "from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice", "out_path) data_pars = {\"data_path\": data_path, \"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\")", "m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet ####################################################################################################", "y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model", "target, grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break return net, optimiser def", "= get_dataset(**data_pars) test_losses = [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test,", "pathlib import Path path = Path(filepath).parent for i in range(1, sublevel + 1):", "kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i -", "= x_train_batch[c:], y[c:] return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs):", "223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy = f.cpu().numpy()[i],", "out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221, 222, 223, 224] plt.figure(1)", "+ forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split c", "return p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot", "x_train_batch = np.array(x_train_batch)[..., 0] y = np.array(y)[..., 0] #### Split c = int(len(x_train_batch)", "y_full), bs): yield rr ###################################################################################################### # Model fit def fit(model, data_pars, compute_pars=None, out_pars=None,", "subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff,", "100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path", "* norm_constant plt.subplot(subplots[plot_id]) plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy,", "[] while len(arr) > size: slice_ = arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr)", "net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step()", "for viz disable_plot = compute_pars[\"disable_plot\"] ### Get Data x_train, y_train, x_test, y_test, _", "### fit model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net,", "replace=False)): ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] *", "data_generator(x_full, y_full, bs): def split(arr, size): arrays = [] while len(arr) > size:", "== 0 or (grad_step < 100 and grad_step % 100 == 0): with", "range(1, sublevel + 1): path = path.parent path = os.path.join(path.absolute(), path_add) return path", "for i in range(1, sublevel + 1): path = path.parent path = os.path.join(path.absolute(),", "function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimiser.state_dict(), },", "functional as F #################################################################################################### from mlmodels.model_tch.nbeats.model import NBeatsNet VERBOSE = False #################################################################################################### #", "############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt", "return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr, size):", "= \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path)", "in range(backcast_length, len(df) - forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch =", "predict(model, module, sess, data_pars=data_pars, out_pars=out_pars, compute_pars=compute_pars) print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line", "plt.grid() plt.plot(range(0, backcast_length), xx, color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length", "ff, color='r') plt.savefig(output) plt.clf() print('Saved image to {}.'.format(output)) ############################################################################################################### # save and load", "for plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i] *", "disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()') plot(net, x, target, backcast_length, forecast_length,", "as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path =", "forecast_length): x_train_batch.append(df[i - backcast_length:i]) y.append(df[i:i + forecast_length]) x_train_batch = np.array(x_train_batch)[..., 0] y =", "= get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full, fit, predict", "plt.clf() print('Saved image to {}.'.format(output)) def plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length", "\"forecast_length\": 5, \"backcast_length\": 10} log(\"## Model params #########################################\") device = torch.device('cpu') model_pars =", "y_test, _, _, _ = get_dataset(**data_pars) test_losses = [] model.eval() _, f =", "get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True)", "arr[:size] arrays.append(slice_) arr = arr[size:] arrays.append(arr) return arrays while True: for rr in", "= NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars) log(\"####", "= False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"): \"\"\" get the", "F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}') if grad_step", "#############################################\") ypred = predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars,", "= load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars", "device, data_pars, max_grad_steps=500): print('--- fiting ---') initial_grad_step = load(net, optimiser) for grad_step, (x,", "data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png'", "= predict(model, data_pars, compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars)", "\"\"\" from pathlib import Path path = Path(filepath).parent for i in range(1, sublevel", "sspace, s, sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet #################################################################################################### # Dataaset", "= net(torch.tensor(x, dtype=torch.float)) subplots = [221, 222, 223, 224] plt.figure(1) plt.subplots_adjust(top=0.88) for i", "< 100 and grad_step % 100 == 0): with torch.no_grad(): save(net, optimiser, grad_step)", "sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds", "100 and grad_step % 100 == 0): with torch.no_grad(): save(net, optimiser, grad_step) if", "compute_pars, out_pars) print(ypred) log(\"#### Plot ###############################################\") plot_predict(ypred, data_pars, compute_pars, out_pars) if __name__ ==", "y[c:] return x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr,", "= torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step", "data_pars[\"train_split_ratio\"] = 1 x_test, y_test, _, _, _ = get_dataset(**data_pars) test_losses = []", "s, sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet #################################################################################################### # Dataaset def", "model_pars) print(module, model) log(\"############ Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars,", "log(\"#### Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser", "out_path = os.get_cwd() + \"/nbeats_test/\" os.makedirs(out_path, exists_ok=True) log(data_path, out_path) data_pars = {\"data_path\": data_path,", "in range(1, sublevel + 1): path = path.parent path = os.path.join(path.absolute(), path_add) return", "plot_model, device, data_pars) return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars,", "forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than", "= {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\": 3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\":", "get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from mlmodels.models import module_load_full, fit, predict module,", "import module_load_full, fit, predict module, model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model", "plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png'", "color='r') # plt.title(f'step #{grad_step} ({i})') output = f'{out_path}/n_beats_{grad_step}.png' plt.savefig(output) plt.clf() print('Saved image to", "batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ###", "model net, optimiser= fit_simple(model, optimiser, data_gen, plot_model, device, data_pars) return net, optimiser def", "+= initial_grad_step optimiser.zero_grad() net.train() backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target,", "\"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path +", "model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser = fit(model, data_pars, compute_pars)", "[] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy()", "#######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation #########################\") from", "= \"\\n\" * m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model Model", "arguments log(\"#### Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"####", "plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') #", "out_pars = {\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\",", "return net, optimiser def fit_simple(net, optimiser, data_generator, on_save_callback, device, data_pars, max_grad_steps=500): print('--- fiting", "1): path = path.parent path = os.path.join(path.absolute(), path_add) return path def log(*s, n=0,", "\"/\"} return model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command", "forecast = net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step =", "def split(arr, size): arrays = [] while len(arr) > size: slice_ = arr[:size]", "_, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item()) p = f.detach().numpy() return p", "+ forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') # plt.title(f'step #{grad_step}", "def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0,", "############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice == 0: log(\"#### Path params ################################################\")", "plt.figure(1) plt.subplots_adjust(top=0.88) for i in range(4): ff, xx, yy = f.cpu().numpy()[i], x[i], target[i]", "kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path, index_col=0, parse_dates=True) if VERBOSE: print(df.head(5))", "print(\"fit success\", sess) log(\"############ Prediction ##################################\") preds = predict(model, module, sess, data_pars=data_pars, out_pars=out_pars,", "forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r') plt.savefig(output) plt.clf() print('Saved image", "x_train_batch, y = [], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i", "Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1) df = pd.read_csv(data_path,", "f.detach().numpy() return p ############################################################################################################### def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import", "* train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:], y[c:] return", "path = path.parent path = os.path.join(path.absolute(), path_add) return path def log(*s, n=0, m=1):", "3, \"forecast_length\": 5, \"backcast_length\": 10, \"thetas_dims\": [7, 8], \"share_weights_in_stack\": False, \"hidden_layer_units\": 256} compute_pars", "x_train, y_train, x_test, y_test, norm_constant def data_generator(x_full, y_full, bs): def split(arr, size): arrays", "#######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train,", "torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return", "color='b') plt.plot(range(backcast_length, backcast_length + forecast_length), yy, color='g') plt.plot(range(backcast_length, backcast_length + forecast_length), ff, color='r')", "Model setup ##########################################\") model = NBeatsNet(**model_pars) log(\"#### Model fit ############################################\") model, optimiser =", "% 100 == 0): with torch.no_grad(): save(net, optimiser, grad_step) if on_save_callback is not", "= [], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length'] for i in range(backcast_length,", "plot_model(net, x, target, grad_step, data_pars, disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] #", "= compute_pars[\"batch_size\"] # greater than 4 for viz disable_plot = compute_pars[\"disable_plot\"] ### Get", "NBeatsNet #################################################################################################### # Dataaset def get_dataset(**kw): data_path = kw['data_path'] train_split_ratio = kw.get(\"train_split_ratio\", 1)", "c = int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test =", "plot_id, i in enumerate(np.random.choice(range(len(p)), size=4, replace=False)): ff, xx, yy = p[i] * norm_constant,", "import NBeatsNet VERBOSE = False #################################################################################################### # Helper functions def os_package_root_path(filepath, sublevel=0, path_add=\"\"):", "arr[size:] arrays.append(arr) return arrays while True: for rr in split((x_full, y_full), bs): yield", "(grad_step < 100 and grad_step % 100 == 0): with torch.no_grad(): save(net, optimiser,", "checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return", "compute_pars = {\"batch_size\": 100, \"disable_plot\": False, \"norm_contsant\": 1.0, \"result_path\": 'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars", "return arrays while True: for rr in split((x_full, y_full), bs): yield rr ######################################################################################################", "grad_step, data_pars) if grad_step > max_grad_steps: print('Finished.') break return net, optimiser def predict(model,", "data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater than 4 for viz # disable_plot", "{CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def get_params(choice=0, data_path=\"dataset/\", **kw): if choice ==", "Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const = get_dataset(**data_pars) log(\"#### Model setup", "\"backcast_length\": 10} log(\"## Model params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK,", "df = df / norm_constant # small leak to the test set here.", "disable_plot=False): forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] # batch_size = compute_pars[\"batch_size\"] # greater", "Loading params #######################################\") model_pars, data_pars, compute_pars, out_pars = get_params(choice=0, data_path=data_path) log(\"############ Model preparation", "'n_beats_test{}.png', \"model_path\": \"mycheckpoint\"} out_pars = {\"out_path\": out_path + \"/\"} return model_pars, data_pars, compute_pars,", "= net(torch.tensor(x, dtype=torch.float).to(device)) loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device)) loss.backward() optimiser.step() print(f'grad_step = {str(grad_step).zfill(6)},", "model = module_load_full(model_uri, model_pars) print(module, model) log(\"############ Model fit ##################################\") model, sess= fit(model,", "get_dataset(**data_pars) test_losses = [] model.eval() _, f = model(torch.tensor(x_test, dtype=torch.float)) test_losses.append(F.mse_loss(f, torch.tensor(y_test, dtype=torch.float)).item())", "#### Split c = int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test,", "# save and load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step':", "load model helper function def save(model, optimiser, grad_step,CHECKPOINT_NAME=\"mycheckpoint\"): torch.save({ 'grad_step': grad_step, 'model_state_dict': model.state_dict(),", "5, \"backcast_length\": 10} log(\"## Model params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\":", "params #########################################\") device = torch.device('cpu') model_pars = {\"stack_types\": [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], \"device\": device, \"nb_blocks_per_stack\":", "out_pars=None, **kw): device = torch.device('cpu') forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] batch_size =", "forecast_length, grad_step) def plot_predict(x_test, y_test, p, data_pars, compute_pars, out_pars): import matplotlib.pyplot as plt", "print(preds) def test(data_path=\"dataset/milk.csv\"): ###loading the command line arguments log(\"#### Loading params #######################################\") model_pars,", "= load(net, optimiser) for grad_step, (x, target) in enumerate(data_generator): grad_step += initial_grad_step optimiser.zero_grad()", "import os import pandas as pd import numpy as np import torch from", "compute_pars, out_pars): import matplotlib.pyplot as plt forecast_length = data_pars[\"forecast_length\"] backcast_length = data_pars[\"backcast_length\"] norm_constant", "data_pars[\"backcast_length\"] norm_constant = compute_pars[\"norm_contsant\"] out_path = out_pars['out_path'] output = f'{out_path}/n_beats_test.png' subplots = [221,", "def plot(net, x, target, backcast_length, forecast_length, grad_step, out_path=\"./\"): import matplotlib.pyplot as plt net.eval()", "log(\"############ Model fit ##################################\") model, sess= fit(model, module, data_pars=data_pars, out_pars=out_pars, compute_pars={}) print(\"fit success\",", "* m print(sjump, sspace, s, sspace, flush=True) #################################################################################################### # Model Model = NBeatsNet", "than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False) if not disable_plot: print('plot()')", "if os.path.exists(CHECKPOINT_NAME): checkpoint = torch.load(CHECKPOINT_NAME) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimizer_state_dict']) grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from", "arguments # arg = load_arguments() model_uri = \"model_tch/nbeats.py\" log(\"#### Loading params #######################################\") model_pars,", "= get_params(choice=0, data_path=data_path) log(\"#### Loading dataset #######################################\") x_train, y_train, x_test, y_test, norm_const =", "print(df.head(5)) #### Preprocess df = df.values # just keep np array here for", "os import pandas as pd import numpy as np import torch from torch", "* n sjump = \"\\n\" * m print(sjump, sspace, s, sspace, flush=True) ####################################################################################################", "optimiser, grad_step) if on_save_callback is not None: on_save_callback(net, x, target, grad_step, data_pars) if", "= compute_pars[\"batch_size\"] # greater than 4 for viz # disable_plot = compute_pars.get(\"disable_plot\", False)", "model_pars, data_pars, compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments", "grad_step = checkpoint['grad_step'] print(f'Restored checkpoint from {CHECKPOINT_NAME}.') return grad_step return 0 ############################################################################################################# def", "compute_pars, out_pars def test2(data_path=\"dataset/milk.csv\", out_path=\"n_beats_test{}.png\", reset=True): ###loading the command line arguments # arg", "= int(len(x_train_batch) * train_split_ratio) x_train, y_train = x_train_batch[:c], y[:c] x_test, y_test = x_train_batch[c:],", "set here. x_train_batch, y = [], [] backcast_length = kw['backcast_length'] forecast_length = kw['forecast_length']" ]
[ "> 0 and fg_num_rois == 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) *", "# We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num =", "between gt rel pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_()", "bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois", "= gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the", "_sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs comprising", "those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1)", "0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) *", "object detection proposals to ground-truth targets. Produces proposal classification labels and bounding-box regression", "keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_()", "= 0 and fg_num_rois = 0, this should not happen!\") # The indices", "1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image", "0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8]", "compute overlap between gt rel pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size,", "21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4]", "rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs comprising foreground and background examples.", "= 0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0, this should", "rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels,", "fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num =", "<NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by <NAME> and <NAME>", "fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This", "= rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0 and fg_num_rois == 0:", "cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)", "super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS =", "values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background RoIs", "bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class", "and fg_num_rois == 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num", "import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to", "gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2)", "_RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth targets. Produces proposal classification labels", "gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel,", "(gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel,", "num_boxes): batch_size = gt_boxes.size(0) # compute overlap between gt rel pairs and all", "bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and bg_num_rois >", "= torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn:", "torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image #", "gt_box_pairs_append[i, :, 0] = i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs", "= i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images", "# no relation continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel =", "overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1)", "n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] =", "= torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds", "= torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image", "0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num", "bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois == 0: # sampling", "elif fg_num_rois > 0 and bg_num_rois == 0: # sampling fg #rand_num =", "to ground-truth targets. Produces proposal classification labels and bounding-box regression targets. \"\"\" def", "= offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps", "gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch =", "labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard", "Licensed under The MIT License [see LICENSE for details] # Written by <NAME>", "21:] > 0).sum() == 0: # no relation continue gt_pairs_i = (gt_boxes[i, :,", "bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0", "number and make an error. # We use numpy rand instead. #rand_num =", "= int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel)", "it will generate very large number and make an error. # We use", "# sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois)", "fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0", "forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap between gt rel", "large number and make an error. # We use numpy rand instead. #rand_num", "within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois", "for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] = i", "gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0)", "\"\"\" Assign object detection proposals to ground-truth targets. Produces proposal classification labels and", "= torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE", "= labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() #", "i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images =", "RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel()", "gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample", "+ gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch", "# Licensed under The MIT License [see LICENSE for details] # Written by", "comprising foreground and background examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps =", "np import numpy.random as npr from ..utils.config import cfg from bbox_transform import bbox_transform,", "self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes,", "bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois", "(max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois", "#rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda()", "for i in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() == 0: #", "will generate very large number and make an error. # We use numpy", "= min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]", "numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois)", "def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap between gt", "= self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self, top,", "import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module):", "fg_num_rois == 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num =", "# for i in range(batch_size): # gt_box_pairs_append[i, :, 0] = i # #", "* gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1)", "examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment", "= np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois >", "Include ground-truth boxes in the set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size,", "= nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self,", "return rois, labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer does not", "import numpy.random as npr from ..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps,", "0 elif bg_num_rois > 0 and fg_num_rois == 0: # sampling bg #rand_num", "labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case when", "> 0 and bg_num_rois == 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) *", ">= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size,", "np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image", "torch import torch.nn as nn import numpy as np import numpy.random as npr", "rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image -", "= gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] =", "def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs", "overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal", "roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image =", "= rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois =", "labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the", "max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img =", "= bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and bg_num_rois", "roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum()", "We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image)", "0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0],", "gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4]", "= 0, this should not happen!\") # The indices that we're selecting (both", "Microsoft # Licensed under The MIT License [see LICENSE for details] # Written", "fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0 and fg_num_rois", "fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0 and fg_num_rois ==", "# sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand has a", "bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif", "propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs,", "gt rel pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for", "= bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0", "ground-truth boxes in the set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1),", "batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size)", "fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values", "fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds", "roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i, :,", "range(batch_size): # gt_box_pairs_append[i, :, 0] = i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append],", "= fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0 and", "rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps", "and background examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous())", "R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see", "import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG", "0, this should not happen!\") # The indices that we're selecting (both fg", "0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not", "image has fewer than max_fg_rois_per_image # foreground RoIs for i in range(batch_size): fg_inds", "= torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap", "torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) #", "# Reorganized and modified by <NAME> and <NAME> # -------------------------------------------------------- import torch import", "fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems", "* bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num]", "0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) *", "cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG =", "an image has fewer than max_fg_rois_per_image # foreground RoIs for i in range(batch_size):", "sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background", "proposal classification labels and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__()", "rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against", "of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8]", "# pdb.set_trace() if fg_num_rois > 0 and bg_num_rois > 0: # sampling fg", "num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment)", "gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes", "torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds =", "modified by <NAME> and <NAME> # -------------------------------------------------------- import torch import torch.nn as nn", ">= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois >", "to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] = i return labels_rel_batch, roi_pairs_batch, keep_inds_batch", "-1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size,", "# Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE", "backward(self, top, propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self,", "gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel,", "torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image", "def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS =", "the case when an image has fewer than max_fg_rois_per_image # foreground RoIs for", "torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset =", "very large number and make an error. # We use numpy rand instead.", "# compute overlap between gt rel pairs and all roi pairs gt_box_pairs =", "2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0,", "Seems torch.rand has a bug, it will generate very large number and make", "== 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image)", "max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch =", "proposals to ground-truth targets. Produces proposal classification labels and bounding-box regression targets. \"\"\"", "rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer", "(c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details]", "# roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image", "= gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_()", "# sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois)", "21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the set of candidate", "# sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num =", "> 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel,", "case when an image has fewer than max_fg_rois_per_image # foreground RoIs for i", "rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0,", "# gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i", "int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs,", "* bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and", "Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License", "in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background", "= (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds", "= np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image", "cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image,", "rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif", "fewer than max_fg_rois_per_image # foreground RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i]", "Reorganized and modified by <NAME> and <NAME> # -------------------------------------------------------- import torch import torch.nn", "class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth targets. Produces proposal classification", "# Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT", "co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1)", "rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num", "detection proposals to ground-truth targets. Produces proposal classification labels and bounding-box regression targets.", "gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the set", "0 and bg_num_rois > 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) #", "License [see LICENSE for details] # Written by <NAME> and <NAME> # --------------------------------------------------------", "gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap between gt rel pairs and", "as np import numpy.random as npr from ..utils.config import cfg from bbox_transform import", "+ gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the set of candidate rois", "should not happen!\") # The indices that we're selecting (both fg and bg)", "> 0 and bg_num_rois > 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)", "bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and bg_num_rois > 0: # sampling", "error. # We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num", "rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case when an", "as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >=", "labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] =", "in the set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() #", "= 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels,", "# -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under", "bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals", "relation continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0))", "batch_size = gt_boxes.size(0) # compute overlap between gt rel pairs and all roi", "bg_num_rois > 0 and fg_num_rois == 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image)", "# Include ground-truth boxes in the set of candidate rois # gt_box_pairs_append =", "npr from ..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2", "and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel", "that we're selecting (both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds)", "gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i, :, 21:]", "= 0 elif bg_num_rois > 0 and fg_num_rois == 0: # sampling bg", "= min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel,", "# gt_box_pairs_append[i, :, 0] = i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1)", "the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] = i return", "9).zero_() # Guard against the case when an image has fewer than max_fg_rois_per_image", "= co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal =", "fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0", "= torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois", "fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs as", "= overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) *", "roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image =", "max_fg_rois_per_image # foreground RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1)", "range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs", "as npr from ..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2,", "foreground RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois =", "= gt_boxes.size(0) # compute overlap between gt rel pairs and all roi pairs", "else: print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not happen!\")", ":4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth", "rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def", "keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self,", "rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image =", "min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:,", "self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)", "bg_num_rois == 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num =", "# -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by <NAME> and <NAME> #", "= torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds", "against the case when an image has fewer than max_fg_rois_per_image # foreground RoIs", "for i in range(batch_size): # gt_box_pairs_append[i, :, 0] = i # # roi_pairs", "roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i,", "= torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset", "set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] =", "> 0).sum() == 0: # no relation continue gt_pairs_i = (gt_boxes[i, :, 21:]", "gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i, :, 0] =", "numpy.random as npr from ..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2,", "0).sum() == 0: # no relation continue gt_pairs_i = (gt_boxes[i, :, 21:] >", "and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by <NAME> and", "gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch", "1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois,", "-------------------------------------------------------- # Reorganized and modified by <NAME> and <NAME> # -------------------------------------------------------- import torch", "The MIT License [see LICENSE for details] # Written by <NAME> and <NAME>", "rois_per_image - fg_rois_per_this_image # Seems torch.rand has a bug, it will generate very", ":, 0] = i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs =", "# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] <", "no relation continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0),", "cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace()", "classification labels and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel", "# rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg", "bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and", ":4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21", "\"\"\" # overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment =", "= overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) +", "bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois =", "torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) #", "* rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois,", "foreground and background examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(),", "selecting (both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select", "# print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and bg_num_rois > 0:", "bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp", "gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask", "# Written by <NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and", "rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois > 0 and fg_num_rois == 0: #", "0: # no relation continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel", "targets. Produces proposal classification labels and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel):", "we're selecting (both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) #", "in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation", "<NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by <NAME>", "0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8]", "self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute", "..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb", "background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) &", "batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size,", "gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] #", "bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand has a bug, it", "rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else:", "offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >=", "8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in", "int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return", "from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background RoIs to", "DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth targets.", "fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0, this", "= all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case when an image has", "gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the set of candidate rois #", "= rois_per_image - fg_rois_per_this_image # Seems torch.rand has a bug, it will generate", "bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection", "num_proposal = overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset", "= torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois,", "gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs comprising foreground and", "use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) *", "arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:]", "RoIs comprising foreground and background examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps", "# Seems torch.rand has a bug, it will generate very large number and", "# -------------------------------------------------------- import torch import torch.nn as nn import numpy as np import", ".view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch =", "roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION *", "< cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) #", "torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois)", "Produces proposal classification labels and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer,", "fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image", "# The indices that we're selecting (both fg and bg) keep_inds = torch.cat([fg_inds,", "fg_num_rois > 0 and bg_num_rois > 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image,", "# overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps,", "offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask =", "= overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset =", "# -------------------------------------------------------- # Reorganized and modified by <NAME> and <NAME> # -------------------------------------------------------- import", "LICENSE for details] # Written by <NAME> and <NAME> # -------------------------------------------------------- # --------------------------------------------------------", "for details] # Written by <NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- #", "fg_rois_per_this_image # Seems torch.rand has a bug, it will generate very large number", "#rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda()", "overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2)", "0 and fg_num_rois == 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda()", "pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of", "and fg_num_rois = 0, this should not happen!\") # The indices that we're", "rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for", "self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size =", "bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image", "\"\"\"This layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image,", "x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size =", "bug, it will generate very large number and make an error. # We", "nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs,", "torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds =", "continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel,", "rois, labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer does not propagate", "= bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois == 0: # sampling fg", "torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\", "import numpy as np import numpy.random as npr from ..utils.config import cfg from", "-------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by <NAME> and <NAME> # --------------------------------------------------------", "\"\"\"Generate a random sample of RoIs comprising foreground and background examples. \"\"\" #", "sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda()", "labels and bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel =", "pdb.set_trace() if fg_num_rois > 0 and bg_num_rois > 0: # sampling fg fg_rois_per_this_image", "BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel()", "MIT License [see LICENSE for details] # Written by <NAME> and <NAME> #", "fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds =", "Written by <NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified", "False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth targets. Produces proposal", "pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in", "print(\"relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not happen!\") #", "gt_boxes.size(0) # compute overlap between gt rel pairs and all roi pairs gt_box_pairs", "when an image has fewer than max_fg_rois_per_image # foreground RoIs for i in", "this should not happen!\") # The indices that we're selecting (both fg and", "keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels", "num_classes): \"\"\"Generate a random sample of RoIs comprising foreground and background examples. \"\"\"", "as nn import numpy as np import numpy.random as npr from ..utils.config import", "* bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num]", "overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment", "propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random", "\"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS", "i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select", "bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num =", "= roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size):", "boxes in the set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_()", "0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation", "print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0 and bg_num_rois > 0: #", "# foreground RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois", "Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI)", "gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:, :4] gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4]", "self._num_classes_rel) return rois, labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer does", "> 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda()", "= roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION", "1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]] # Include", "& (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if", "has a bug, it will generate very large number and make an error.", "candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] #", "does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate", "roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap between gt rel pairs", "nn import numpy as np import numpy.random as npr from ..utils.config import cfg", "generate very large number and make an error. # We use numpy rand", "regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS =", "= torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds])", "rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image", "i in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no", "fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs comprising foreground and background", "= np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image", "- fg_rois_per_this_image # Seems torch.rand has a bug, it will generate very large", "bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois = 0 and", "import torch import torch.nn as nn import numpy as np import numpy.random as", "all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a random sample of RoIs comprising foreground", "Clamp relation labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds])", "fg_num_rois > 0 and bg_num_rois == 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image)", "#rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda()", "bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign", "top, propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs,", "if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation continue gt_pairs_i", "cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() # print(fg_num_rois, bg_num_rois) # pdb.set_trace() if fg_num_rois > 0", "0 and bg_num_rois == 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda()", "has fewer than max_fg_rois_per_image # foreground RoIs for i in range(batch_size): fg_inds =", "an error. # We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda()", "by <NAME> and <NAME> # -------------------------------------------------------- import torch import torch.nn as nn import", "background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] = i return labels_rel_batch,", "a bug, it will generate very large number and make an error. #", "indices that we're selecting (both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0)", ":, 21:] > 0).sum() == 0: # no relation continue gt_pairs_i = (gt_boxes[i,", "min(fg_rois_per_image, fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] #", "elif bg_num_rois > 0 and fg_num_rois == 0: # sampling bg #rand_num =", "labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size,", "# # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1", "num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))", "Guard against the case when an image has fewer than max_fg_rois_per_image # foreground", "sample of RoIs comprising foreground and background examples. \"\"\" # overlaps: (rois x", "1]] # Include ground-truth boxes in the set of candidate rois # gt_box_pairs_append", "from ..utils.config import cfg from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import", "and bg_num_rois == 0: # sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num", "and <NAME> # -------------------------------------------------------- import torch import torch.nn as nn import numpy as", "# Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for", "happen!\") # The indices that we're selecting (both fg and bg) keep_inds =", "-------------------------------------------------------- import torch import torch.nn as nn import numpy as np import numpy.random", "gt_box_pairs[i][:n_rel, 4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 +", "== 0: # no relation continue gt_pairs_i = (gt_boxes[i, :, 21:] > 0).nonzero()", "bg_num_rois > 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num =", "rois_per_image, 9).zero_() # Guard against the case when an image has fewer than", "rel pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i", "rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois ==", "background examples. \"\"\" # overlaps: (rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps,", "= torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels =", "fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand has", "sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num", "pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth", "fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image,", "RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i]", "torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE /", "torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois == 0: #", "Select sampled values from various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the", "[BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois =", "gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self, top, propagate_down, bottom):", "torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size", "cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() ==", "(both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled", "relation labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0]", "= labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case", "than max_fg_rois_per_image # foreground RoIs for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >=", "fg_num_rois) # rand_num = torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling", "co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object", "fg_inds.numel() # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = torch.nonzero((max_overlaps[i]", "bg_rois_per_this_image = 0 elif bg_num_rois > 0 and fg_num_rois == 0: # sampling", "-------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The", "= torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes):", "= torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs as those", "(rois x gt_boxes) overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(), gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size", "= max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_() roi_pairs_batch", "= torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois == 0:", "= fg_inds.numel() # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds =", "rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image =", "= torch.randperm(fg_num_rois).long().cuda() rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda() fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image =", "offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment labels", "self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps def backward(self, top, propagate_down,", "targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)", "fg_num_rois = fg_inds.numel() # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds", "all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if", "bg_inds[rand_num] elif fg_num_rois > 0 and bg_num_rois == 0: # sampling fg #rand_num", "ground-truth targets. Produces proposal classification labels and bounding-box regression targets. \"\"\" def __init__(self,", "* fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image =", "and modified by <NAME> and <NAME> # -------------------------------------------------------- import torch import torch.nn as", "4:8] = gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel,", "gt_box_pairs[:,:,:8].contiguous()) max_overlaps, gt_assignment = torch.max(overlaps, 2) batch_size = overlaps.size(0) num_proposal = overlaps.size(1) num_boxes_per_img", "= int(cfg.TRAIN.BATCH_SIZE / num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps =", "layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes):", "The indices that we're selecting (both fg and bg) keep_inds = torch.cat([fg_inds, bg_inds],", "by <NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified by", "2015 Microsoft # Licensed under The MIT License [see LICENSE for details] #", "== 0: # sampling bg #rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image)", "and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size):", "sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand has a bug,", "bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand has a bug, it will", "labels, keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\"", "0 and fg_num_rois = 0, this should not happen!\") # The indices that", "* fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num]", "and bg_num_rois > 0: # sampling fg fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) # rand_num", "labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, self._num_classes_rel) return rois, labels, keeps", "bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) & (max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1) bg_num_rois = bg_inds.numel() #", "= gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i, :, 0] = i", "rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois", "sampling fg #rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois) rand_num", "= torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0)", "import torch.nn as nn import numpy as np import numpy.random as npr from", "= (gt_boxes[i, :, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] =", "a random sample of RoIs comprising foreground and background examples. \"\"\" # overlaps:", "= False class _RelProposalTargetLayer(nn.Module): \"\"\" Assign object detection proposals to ground-truth targets. Produces", "<NAME> and <NAME> # -------------------------------------------------------- import torch import torch.nn as nn import numpy", "# Guard against the case when an image has fewer than max_fg_rois_per_image #", "bounding-box regression targets. \"\"\" def __init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS", "Assign object detection proposals to ground-truth targets. Produces proposal classification labels and bounding-box", "9).zero_() for i in range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() == 0:", "all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case when an image has fewer", "def backward(self, top, propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass def", "details] # Written by <NAME> and <NAME> # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized", "i in range(batch_size): # gt_box_pairs_append[i, :, 0] = i # # roi_pairs =", "num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs, fg_rois_per_image,", ":, 21:] > 0).nonzero() n_rel = min(gt_box_pairs[i].size(0), gt_pairs_i.size(0)) gt_box_pairs[i][:n_rel, 0:4] = gt_boxes[i][gt_pairs_i[:n_rel, 0]][:,", "<NAME> # -------------------------------------------------------- import torch import torch.nn as nn import numpy as np", "torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def forward(self, roi_pairs, gt_boxes, num_boxes): batch_size = gt_boxes.size(0) # compute overlap between", "and make an error. # We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image)", "torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs as those within", "pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i, :,", "under The MIT License [see LICENSE for details] # Written by <NAME> and", "of RoIs comprising foreground and background examples. \"\"\" # overlaps: (rois x gt_boxes)", "[see LICENSE for details] # Written by <NAME> and <NAME> # -------------------------------------------------------- #", "fg_num_rois = 0, this should not happen!\") # The indices that we're selecting", "* bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image =", "keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various arrays:", "keeps def backward(self, top, propagate_down, bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass", "overlaps.size(1) num_boxes_per_img = overlaps.size(2) offset = torch.arange(0, batch_size) * gt_box_pairs.size(1) offset = offset.view(-1,", "for i in range(batch_size): fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() #", "gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i, :, 0] = i #", "torch.from_numpy(rand_num).long().cuda() fg_inds = fg_inds[rand_num] fg_rois_per_this_image = rois_per_image bg_rois_per_this_image = 0 elif bg_num_rois >", "# gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i, :, 0]", "make an error. # We use numpy rand instead. #rand_num = (torch.rand(bg_rois_per_this_image) *", "torch.nn as nn import numpy as np import numpy.random as npr from ..utils.config", "the set of candidate rois # gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9]", "instead. #rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num =", "(torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda() rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds =", "/ num_images) fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)) labels, rois, keeps = self._sample_roi_pairs_pytorch(roi_pairs, gt_box_pairs,", "bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False class _RelProposalTargetLayer(nn.Module): \"\"\"", "roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_() # Guard against the case when an image", "RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0 roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds]) roi_pairs_batch[i,:,0] = i return labels_rel_batch, roi_pairs_batch,", "overlap between gt rel pairs and all roi pairs gt_box_pairs = roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER,", "roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size): #", "not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes): \"\"\"Generate a", "not happen!\") # The indices that we're selecting (both fg and bg) keep_inds", "__init__(self, nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS)", "(gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation continue gt_pairs_i =", "bg_num_rois = 0 and fg_num_rois = 0, this should not happen!\") # The", "1).type_as(gt_assignment) + gt_assignment labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\\ .view(batch_size, -1) fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH", "various arrays: labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background RoIs to 0", "if fg_num_rois > 0 and bg_num_rois > 0: # sampling fg fg_rois_per_this_image =", "= gt_boxes[i][gt_pairs_i[:n_rel, 1]][:, :4] gt_box_pairs[i][:n_rel, 8] = gt_boxes[i][gt_pairs_i[:n_rel, 0], 21 + gt_pairs_i[:n_rel, 1]]", "bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from various", "torch.rand has a bug, it will generate very large number and make an", "self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS) def", "numpy as np import numpy.random as npr from ..utils.config import cfg from bbox_transform", ">= cfg.TRAIN.RELPN_FG_THRESH).view(-1) fg_num_rois = fg_inds.numel() # Select background RoIs as those within [BG_THRESH_LO,", "from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2 import pdb DEBUG = False", "gt_box_pairs_append = roi_pairs.new(batch_size, gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in", "bottom): \"\"\"This layer does not propagate gradients.\"\"\" pass def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image,", "range(batch_size): if (gt_boxes[i, :, 21:] > 0).sum() == 0: # no relation continue", "nclasses_rel): super(_RelProposalTargetLayer, self).__init__() self._num_classes_rel = nclasses_rel self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS) self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) self.BBOX_INSIDE_WEIGHTS", "and bg) keep_inds = torch.cat([fg_inds, bg_inds], 0) keep_inds_batch[i].copy_(keep_inds) # Select sampled values from", "0] = i # # roi_pairs = torch.cat([roi_pairs, gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous()", "random sample of RoIs comprising foreground and background examples. \"\"\" # overlaps: (rois", "= roi_pairs.new(batch_size, cfg.MAX_ROI_PAIR_NUMBER, 9).zero_() for i in range(batch_size): if (gt_boxes[i, :, 21:] >", "gt_box_pairs.size(1), roi_pairs.size(2)).zero_() # gt_box_pairs_append[:,:,1:9] = gt_box_pairs[:,:,:8] # for i in range(batch_size): # gt_box_pairs_append[i,", "= fg_inds[rand_num[:fg_rois_per_this_image]] # sampling bg bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image # Seems torch.rand", "fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH keep_inds_batch = labels.new(batch_size, rois_per_image).zero_() labels_rel_batch = labels.new(batch_size, rois_per_image).zero_()", "0], 21 + gt_pairs_i[:n_rel, 1]] # Include ground-truth boxes in the set of", "gt_box_pairs_append], 1) roi_pairs = roi_pairs.contiguous() num_images = 1 rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)", "Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for", "np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] elif fg_num_rois > 0", "np.floor(np.random.rand(rois_per_image) * bg_num_rois) rand_num = torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image", "labels_rel_batch[i].copy_(labels[i][keep_inds]) # Clamp relation labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] =", "# Clamp relation labels for the background RoIs to 0 labels_rel_batch[i][fg_rois_per_this_image:] = 0", "torch.from_numpy(rand_num).long().cuda() bg_inds = bg_inds[rand_num] bg_rois_per_this_image = rois_per_image fg_rois_per_this_image = 0 else: print(\"relpn: bg_num_rois", "in range(batch_size): # gt_box_pairs_append[i, :, 0] = i # # roi_pairs = torch.cat([roi_pairs," ]
[ "def br(): l=[] n=int(input(\"Enter No of Book to Add: \")) for _ in", "with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added", "print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for", "Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your", "################################ # def al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ##################################", "def sbookreturn(self): ################# student book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book", "############## Admin Menu ################### # main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist)", "def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showbook()", "Added Successfully\") ######################## faculti register end ############################# ################################# show book detail ##############################z def", "print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for", "print(\"'{Book code : [no. of Copies , Book Name]}'\\n\") print(objdist) ############################### show book", "Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail", "Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg.", "def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############", "3: sr() elif op == 4: ss() elif op == 5: fr() elif", "l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are", "Main(self): ############## Admin Menu ################### # main(self) def getbook(self): ############ book reg. #################", "log end ############################## ############################## faculty login start ###################### # def fl(): # f1=Faculty()", "#from adminclass import* #import pickle # from studentclass import* # from facultyclass import*", "Book \\n9 Student Book Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To", "op == 9 : sbr() elif op == 10: fbi() elif op ==", "Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student detail", "from facultyclass import* ########################## BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter No", "= pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55)", "######################## BOOK regs. End ###################################### ########################### Student reg. start #################################### def sr(): l=[]", "def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1", "faculty book issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return ######################", "elif op == 12: fc() elif op == 13: sb() elif op ==", "############################ show faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while", "pickle from function import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} # def", "Fine\\n13 for Search Book \\n0 for Exit\\n\")) if op == 1: br() elif", "reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self): ##############", "while True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55)", "Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main() def sbookissue(self): ##############", "reg. start ################################## def fr(): l=[] n=int(input(\"How many Faculty you want to ADD:", "1: br() elif op == 2: bs() elif op == 3: sr() elif", "_ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End", "as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ###########################", "regs. End #################################### ########################## faculti reg. start ################################## def fr(): l=[] n=int(input(\"How many", "as p: objdist=pickle.load(p) print(objdist) ############################ show student detail end ################################# ############################ show faculty", "Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail", "def sr(): l=[] n = int(input(\"Enter No of Student To Add: \")) for", "l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties", "detail end ################################# ############################ show faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\")", "return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue()", "int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6", "Successfully\") ######################## faculti register end ############################# ################################# show book detail ##############################z def bs():", "############# Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty", "in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end #############################", "range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp:", "def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main():", "\\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for Search Book", "# f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook Issue ################### def sbi():", "Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for Search Book \\n0 for", "##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp)", "start ########################################## def br(): l=[] n=int(input(\"Enter No of Book to Add: \")) for", "show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try:", "return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def", "elif op == 4: ss() elif op == 5: fr() elif op ==", "for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with", "bdist={} # def Main(self): ############## Admin Menu ################### # main(self) def getbook(self): ############", "open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data", "Faculty you want to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail", "elif op == 7: sbi() elif op == 8 : rb() elif op", "as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name :\") print(\"'{Book code", "Are Added Successfully\") ######################## BOOK regs. End ###################################### ########################### Student reg. start ####################################", "################################## # def sl(): # s1=Student() # s1.studentlogin() ############################## student log end ##############################", "Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove Book \\n9 Student Book Return", "for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs.", "# main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book", "l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End #################################### ##########################", "Student Book Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13", "code with Remaining Copies and Name :\") print(\"'{Book code : [no. of Copies", "####################### show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True:", "print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as", "student regs. End #################################### ########################## faculti reg. start ################################## def fr(): l=[] n=int(input(\"How", "op == 11: fbr() elif op == 12: fc() elif op == 13:", "############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin", "def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book", "############################ faculty login End ####################### ############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue()", "try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\")", "Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student", "def fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty", "open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\")", "faculty login End ####################### ############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() ####################", "fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while True: print(\"\\n*************************************", "op == 4: ss() elif op == 5: fr() elif op == 6:", "show book detail end ########################### ####################### show student detail ############################## def ss(): with", "book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj", "open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\")", "Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail", "elif op == 3: sr() elif op == 4: ss() elif op ==", "faculty detail end ################################## ################################# Admin LogIn ################################ # def al(): # a1=Admin()", "Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in", "Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student", "No of Book to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail", "def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn()", "LogIn ################################ # def al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn End", "for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l:", "Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7", "elif op == 9 : sbr() elif op == 10: fbi() elif op", "== 3: sr() elif op == 4: ss() elif op == 5: fr()", "main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove", "################################## def fr(): l=[] n=int(input(\"How many Faculty you want to ADD: \")) for", "11: fbr() elif op == 12: fc() elif op == 13: sb() elif", "Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login", "student detail end ################################# ############################ show faculty detail start ########################## def fs(): with", "YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End ###################################### ########################### Student reg. start", "################ book remove ################## Book_Remove(self) def sbookreturn(self): ################# student book return ############### SBook_Return(self)", "print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student", "obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as", "print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining", "def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def dayfine(self): #################### fine cal", "Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin())", "while True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2", "_ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\")", "faculty book return ########### FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################", "book return ########### FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from", "# def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login End ####################### ############################", "########################################## def br(): l=[] n=int(input(\"Enter No of Book to Add: \")) for _", "end ############################## ############################## faculty login start ###################### # def fl(): # f1=Faculty() #", "for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with", "fr(): l=[] n=int(input(\"How many Faculty you want to ADD: \")) for _ in", "Book Issue \\n8 Remove Book \\n9 Student Book Return \\n10 Faculty Book Issue\\n11", "print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for", "def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login ##############", "you want to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for", "################# student book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue ##############", "l=[] n = int(input(\"Enter No of Student To Add: \")) for _ in", "############################ Student Log start ################################## # def sl(): # s1=Student() # s1.studentlogin() ##############################", "pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End #################################### ########################## faculti", "= int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty", "def sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def rb(): a2=Admin() a2.bookremove() def", "\\n8 Remove Book \\n9 Student Book Return \\n10 Faculty Book Issue\\n11 Faculty Book", "to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\")", "def al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student", "# from studentclass import* # from facultyclass import* ########################## BOOK regs. start ##########################################", "regs. End ###################################### ########################### Student reg. start #################################### def sr(): l=[] n =", "book remove ################### def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book return", "objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ################################## ################################# Admin LogIn ################################ #", "fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student", "op == 6: fs() elif op == 7: sbi() elif op == 8", "***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student", "f1=Faculty() # f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook Issue ################### def", "5: fr() elif op == 6: fs() elif op == 7: sbi() elif", "Issue \\n8 Remove Book \\n9 Student Book Return \\n10 Faculty Book Issue\\n11 Faculty", "Added Successfully\") ########################### student regs. End #################################### ########################## faculti reg. start ################################## def", "for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with", "End ####################### ############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove", "op == 12: fc() elif op == 13: sb() elif op == 0:", "#################################### def sr(): l=[] n = int(input(\"Enter No of Student To Add: \"))", "end ################################# ############################ show faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as", "== 4: ss() elif op == 5: fr() elif op == 6: fs()", "start ################################## def fr(): l=[] n=int(input(\"How many Faculty you want to ADD: \"))", "op == 7: sbi() elif op == 8 : rb() elif op ==", "student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self):", "def fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine", "in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as", "fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti", "###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle # from studentclass import* #", "2: bs() elif op == 3: sr() elif op == 4: ss() elif", "end ############################# ################################# show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp:", "###################################### ########################### Student reg. start #################################### def sr(): l=[] n = int(input(\"Enter No", "show student detail end ################################# ############################ show faculty detail start ########################## def fs():", "l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end ############################# #################################", "def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ##########", "\\n0 for Exit\\n\")) if op == 1: br() elif op == 2: bs()", "n=int(input(\"Enter No of Book to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter", "True: try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with", "pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p)", "print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _", "fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook Issue", "Successfully\") ########################### student regs. End #################################### ########################## faculti reg. start ################################## def fr():", "p: objdist=pickle.load(p) print(objdist) ############################ show student detail end ################################# ############################ show faculty detail", "if op == 1: br() elif op == 2: bs() elif op ==", "as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ########################", "########################## BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter No of Book to", "objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name :\") print(\"'{Book code : [no.", "################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn() def", "op == 5: fr() elif op == 6: fs() elif op == 7:", "detail end ########################### ####################### show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as", "fdist={} bdist={} # def Main(self): ############## Admin Menu ################### # main(self) def getbook(self):", "reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self): ###############", "obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book", "ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) # print(type(obj))", "return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self):", "f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook Issue ################### def sbi(): a1=Admin()", "############################# ################################# show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while", "# def al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ################################## ############################", "{_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK", ":\") print(\"'{Book code : [no. of Copies , Book Name]}'\\n\") print(objdist) ############################### show", "Successfully\") ######################## BOOK regs. End ###################################### ########################### Student reg. start #################################### def sr():", "Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove Book", "start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj =", "fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book", "for Exit\\n\")) if op == 1: br() elif op == 2: bs() elif", "in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End ####################################", "# s1=Student() # s1.studentlogin() ############################## student log end ############################## ############################## faculty login start", "with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) # print(type(obj)) #", "import* ########################## BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter No of Book", "break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and", "of Student To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for", "################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self): ############### student", "fs() elif op == 7: sbi() elif op == 8 : rb() elif", "op == 10: fbi() elif op == 11: fbr() elif op == 12:", "def sl(): # s1=Student() # s1.studentlogin() ############################## student log end ############################## ############################## faculty", "n=int(input(\"How many Faculty you want to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\")", "Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def rb(): a2=Admin()", "########################### Student reg. start #################################### def sr(): l=[] n = int(input(\"Enter No of", "= int(input(\"Enter No of Student To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\")", "book issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return ###################### a5=Admin()", "Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove", "l=[] n=int(input(\"How many Faculty you want to ADD: \")) for _ in range(n):", "########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp)", "objdist=pickle.load(p) print(objdist) ############################ show student detail end ################################# ############################ show faculty detail start", "Admin login ############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self)", "except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################", "student detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self):", "fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK", "try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\")", "book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue ################### a4=Admin()", "elif op == 2: bs() elif op == 3: sr() elif op ==", "open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\")", "faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self):", "obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist)", "DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle # from studentclass import* # from", "open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name :\") print(\"'{Book", "Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self): ############### student reg.", "detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main() def", ": [no. of Copies , Book Name]}'\\n\") print(objdist) ############################### show book detail end", "login start ###################### # def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login", "def sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty", "True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with", "Return\\n12 To Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\")) if op ==", "EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show", "################################# Admin LogIn ################################ # def al(): # a1=Admin() # a1.adminlogin() ############################# Admin", "BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter No of Book to Add:", "################# SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self): ################# student", "8 : rb() elif op == 9 : sbr() elif op == 10:", "# s1.studentlogin() ############################## student log end ############################## ############################## faculty login start ###################### #", "4: ss() elif op == 5: fr() elif op == 6: fs() elif", "br(): l=[] n=int(input(\"Enter No of Book to Add: \")) for _ in range(n):", "faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main()", "################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu", "print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name", "# a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student Log start", "op == 8 : rb() elif op == 9 : sbr() elif op", "== 9 : sbr() elif op == 10: fbi() elif op == 11:", "Copies , Book Name]}'\\n\") print(objdist) ############################### show book detail end ########################### ####################### show", "adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue", "def Main(self): ############## Admin Menu ################### # main(self) def getbook(self): ############ book reg.", "in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as", "_ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End", "pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p)", "Student reg. start #################################### def sr(): l=[] n = int(input(\"Enter No of Student", "print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end ############################# ################################# show book", "try: obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\")", "############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp)", "code : [no. of Copies , Book Name]}'\\n\") print(objdist) ############################### show book detail", "Remove Book \\n9 Student Book Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12", "##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ###########", "fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def dayfine(self): #################### fine cal ######################", "op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5", "################################## ############################ Student Log start ################################## # def sl(): # s1=Student() # s1.studentlogin()", "open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student detail end ################################# ############################ show", "Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp)", "Search Book \\n0 for Exit\\n\")) if op == 1: br() elif op ==", "many Faculty you want to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter", "fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle # from studentclass", "fbi() elif op == 11: fbr() elif op == 12: fc() elif op", "def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove ##################", "book remove ################## Book_Remove(self) def sbookreturn(self): ################# student book return ############### SBook_Return(self) def", "== 8 : rb() elif op == 9 : sbr() elif op ==", "as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ################################## ################################# Admin LogIn", "################################## ################################# Admin LogIn ################################ # def al(): # a1=Admin() # a1.adminlogin() #############################", "#import pickle # from studentclass import* # from facultyclass import* ########################## BOOK regs.", "of Book to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for", "############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self) def", "rb() elif op == 9 : sbr() elif op == 10: fbi() elif", "class Admin: sdist={} fdist={} bdist={} # def Main(self): ############## Admin Menu ################### #", "############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self) def", "fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate", "faculty login start ###################### # def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty", "a2=Admin() a2.bookremove() def sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn() def fbi():", "calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin", "Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4", "################################# show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True:", "l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks", "Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue", "if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################", "Book \\n0 for Exit\\n\")) if op == 1: br() elif op == 2:", "# print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\")", "Menu ################### # main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self):", "print(objdist) ############################### show book detail end ########################### ####################### show student detail ############################## def", "as fp: while True: try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\")", "range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp:", "Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book", "start #################################### def sr(): l=[] n = int(input(\"Enter No of Student To Add:", "############################ show student detail end ################################# ############################ show faculty detail start ########################## def", "Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self): #################", "_ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end", "faculty book return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin()", "[no. of Copies , Book Name]}'\\n\") print(objdist) ############################### show book detail end ###########################", "###################### # def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login End #######################", "def fr(): l=[] n=int(input(\"How many Faculty you want to ADD: \")) for _", "dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle #", "and Name :\") print(\"'{Book code : [no. of Copies , Book Name]}'\\n\") print(objdist)", "################################################################### class Admin: sdist={} fdist={} bdist={} # def Main(self): ############## Admin Menu ###################", "#################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle # from", "obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as", "showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist)", "fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while", "== 10: fbi() elif op == 11: fbr() elif op == 12: fc()", "To Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\")) if op == 1:", "start ###################### # def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login End", "main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book", "book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def", "print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _", "################################################################################################ #from adminclass import* #import pickle # from studentclass import* # from facultyclass", "################### def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book return ######################### a3=Admin()", "fp: while True: try: obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except", "Student Log start ################################## # def sl(): # s1=Student() # s1.studentlogin() ############################## student", "n = int(input(\"Enter No of Student To Add: \")) for _ in range(n):", "except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ###############################", "\\n9 Student Book Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate", "############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self) def", "reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ##############", "To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\")", "def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) #", "\")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty()", "# a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student Log start ################################## #", "== 2: bs() elif op == 3: sr() elif op == 4: ss()", "# from facultyclass import* ########################## BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter", "to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\")", "with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ################################## #################################", "Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\"))", "Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self):", "def fl(): # f1=Faculty() # f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook", "show faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True:", "def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showfaculty()", "End #################################### ########################## faculti reg. start ################################## def fr(): l=[] n=int(input(\"How many Faculty", "l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End ###################################### ###########################", "############# student book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue", "############### faculty book return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ###################", "Detail\\n7 Student Book Issue \\n8 Remove Book \\n9 Student Book Return \\n10 Faculty", "{_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK", "6: fs() elif op == 7: sbi() elif op == 8 : rb()", "op == 3: sr() elif op == 4: ss() elif op == 5:", "bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self): ################# student book return ###############", "############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self):", "#################################### ########################## faculti reg. start ################################## def fr(): l=[] n=int(input(\"How many Faculty you", "want to ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty:", "Admin LogIn End ################################## ############################ Student Log start ################################## # def sl(): #", "Student To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student:", "Name]}'\\n\") print(objdist) ############################### show book detail end ########################### ####################### show student detail ##############################", "bs() elif op == 3: sr() elif op == 4: ss() elif op", "YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end ############################# ################################# show book detail", "Added Successfully\") ######################## BOOK regs. End ###################################### ########################### Student reg. start #################################### def", "######################## faculty book return ########### FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self)", "while True: try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55)", "of Copies , Book Name]}'\\n\") print(objdist) ############################### show book detail end ########################### #######################", "with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents Are Added", "login End ####################### ############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book", "detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj =", "a4.fbookissue() def fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn() def fc(): ####################", "elif op == 6: fs() elif op == 7: sbi() elif op ==", "######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue() def", "# def sl(): # s1=Student() # s1.studentlogin() ############################## student log end ############################## ##############################", "elif op == 8 : rb() elif op == 9 : sbr() elif", "show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try:", "pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register end ############################# ################################# show", "<filename>adminclass.py import pickle from function import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={}", "a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\")", "########################### ####################### show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while", "10: fbi() elif op == 11: fbr() elif op == 12: fc() elif", "== 12: fc() elif op == 13: sb() elif op == 0: break", "print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3", "import pickle from function import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} #", "\")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent()", "from studentclass import* # from facultyclass import* ########################## BOOK regs. start ########################################## def", "for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ######################## faculti register", "end ########################### ####################### show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp:", "== 5: fr() elif op == 6: fs() elif op == 7: sbi()", "detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): #############", "FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import", "book detail ########## Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self):", "as fp: while True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\")", "faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self)", "print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End ###################################### ########################### Student reg.", "True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter Your Opetion\\n1 Book Register\\n2 Book", "YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End #################################### ########################## faculti reg. start", "getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self)", "a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student Log start ################################## # def", "print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p)", "print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student detail end #################################", "p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ################################## ################################# Admin LogIn ################################", "9 : sbr() elif op == 10: fbi() elif op == 11: fbr()", "end ################################## ################################# Admin LogIn ################################ # def al(): # a1=Admin() # a1.adminlogin()", "range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp:", "No of Student To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail", "getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self)", "import* # from facultyclass import* ########################## BOOK regs. start ########################################## def br(): l=[]", "fbr() elif op == 12: fc() elif op == 13: sb() elif op", "detail ########## Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ###############", "student book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue ###################", "while True: try: obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError:", "############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ########################", "book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self):", "print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty", "pickle # from studentclass import* # from facultyclass import* ########################## BOOK regs. start", "a1.sbookissue() #################### book remove ################### def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student", "sl(): # s1=Student() # s1.studentlogin() ############################## student log end ############################## ############################## faculty login", "############################## student log end ############################## ############################## faculty login start ###################### # def fl():", "print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ##################################", "student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj", "sbr() elif op == 10: fbi() elif op == 11: fbr() elif op", "fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showfaculty() except", "LogIn End ################################## ############################ Student Log start ################################## # def sl(): # s1=Student()", "Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\")) if op == 1: br()", "fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return", "a3.sbookreturn() def fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue() def fbr(): ###############", "ss() elif op == 5: fr() elif op == 6: fs() elif op", "p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name :\") print(\"'{Book code :", "Book Return\\n12 To Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\")) if op", "print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _", "register end ############################# ################################# show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as", "except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code", "break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student detail end", "== 6: fs() elif op == 7: sbi() elif op == 8 :", "############################## ############################## faculty login start ###################### # def fl(): # f1=Faculty() # f1.facultylogin()", "ADD: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin())", "def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book", "adminclass import* #import pickle # from studentclass import* # from facultyclass import* ##########################", "l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are", "with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added", "show faculty detail end ################################## ################################# Admin LogIn ################################ # def al(): #", "Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove Book \\n9 Student Book", "== 7: sbi() elif op == 8 : rb() elif op == 9", "Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin())", "Copies and Name :\") print(\"'{Book code : [no. of Copies , Book Name]}'\\n\")", "def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle", "FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def dayfine(self): #################### fine", "_ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\")", "Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book", "\")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book: {_+1}\") l.append(Admin()) l[_].getbook()", "pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with", "End ################################## ############################ Student Log start ################################## # def sl(): # s1=Student() #", "Log start ################################## # def sl(): # s1=Student() # s1.studentlogin() ############################## student log", "############ Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if Admin_LogIn(self): main() def sbookissue(self):", "for Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l:", "Are Added Successfully\") ########################### student regs. End #################################### ########################## faculti reg. start ##################################", "br() elif op == 2: bs() elif op == 3: sr() elif op", "Book to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Book:", "Book_Remove(self) def sbookreturn(self): ################# student book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty", "Book Name]}'\\n\") print(objdist) ############################### show book detail end ########################### ####################### show student detail", "7: sbi() elif op == 8 : rb() elif op == 9 :", "Admin LogIn ################################ # def al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn", "return ########### FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass", "studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def rb():", "Exit\\n\")) if op == 1: br() elif op == 2: bs() elif op", "################## Book_Remove(self) def sbookreturn(self): ################# student book return ############### SBook_Return(self) def fbookissue(self): #####################", "fp: while True: try: obj = pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break", "with Remaining Copies and Name :\") print(\"'{Book code : [no. of Copies ,", "################################# ############################ show faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp:", "a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book issue ################### a4=Admin() a4.fbookissue() def fbr():", "al(): # a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student Log", "Are Added Successfully\") ######################## faculti register end ############################# ################################# show book detail ##############################z", "Student Book Issue \\n8 Remove Book \\n9 Student Book Return \\n10 Faculty Book", "Book: {_+1}\") l.append(Admin()) l[_].getbook() with open(\"bookdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp)", "detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj =", "obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break", "############### faculty book issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return", "######################## faculti register end ############################# ################################# show book detail ##############################z def bs(): with", "faculti reg. start ################################## def fr(): l=[] n=int(input(\"How many Faculty you want to", "a5.dayfine() def sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op", "for Search Book \\n0 for Exit\\n\")) if op == 1: br() elif op", "def sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op =", "Your Opetion\\n1 Book Register\\n2 Book Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty", "############## Admin login ############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue #################", "############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def", "def showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg. ###############", "with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError:", "a2.bookremove() def sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ###############", "############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def dayfine(self): ####################", "sr() elif op == 4: ss() elif op == 5: fr() elif op", "student log end ############################## ############################## faculty login start ###################### # def fl(): #", "getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self)", "def bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self): ################# student book return", "Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in", "Deatil\\n3 Student Reg.\\n4 Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8", "############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self):", "s1=Student() # s1.studentlogin() ############################## student log end ############################## ############################## faculty login start ######################", "sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn() def fbi(): ############### faculty book", "in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End ######################################", "print(objdist) ############################### show faculty detail end ################################## ################################# Admin LogIn ################################ # def", "Admin: sdist={} fdist={} bdist={} # def Main(self): ############## Admin Menu ################### # main(self)", "s1.studentlogin() ############################## student log end ############################## ############################## faculty login start ###################### # def", "a1=Admin() # a1.adminlogin() ############################# Admin LogIn End ################################## ############################ Student Log start ##################################", "########################### student regs. End #################################### ########################## faculti reg. start ################################## def fr(): l=[]", "issue ################### a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn()", "############### book detail ########## Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def", "############################## faculty login start ###################### # def fl(): # f1=Faculty() # f1.facultylogin() ############################", "remove ################## Book_Remove(self) def sbookreturn(self): ################# student book return ############### SBook_Return(self) def fbookissue(self):", "print(\"Book code with Remaining Copies and Name :\") print(\"'{Book code : [no. of", "detail end ################################## ################################# Admin LogIn ################################ # def al(): # a1=Admin() #", "faculty detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try:", "EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with", "########### FBook_Return(self) def dayfine(self): #################### fine cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import*", "in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as", "############### student detail ############# Display_Student(self) def getfaculty(self): ############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def", "sr(): l=[] n = int(input(\"Enter No of Student To Add: \")) for _", "facultyclass import* ########################## BOOK regs. start ########################################## def br(): l=[] n=int(input(\"Enter No of", "open(\"studentdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) # print(type(obj)) # print(obj)", "int(input(\"Enter No of Student To Add: \")) for _ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter", "for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs.", "Faculty Book Return\\n12 To Calculate Fine\\n13 for Search Book \\n0 for Exit\\n\")) if", "reg. start #################################### def sr(): l=[] n = int(input(\"Enter No of Student To", "a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book()", "import* #import pickle # from studentclass import* # from facultyclass import* ########################## BOOK", "a4=Admin() a4.fbookissue() def fbr(): ############### faculty book return ###################### a5=Admin() a5.fbookreturn() def fc():", "faculti register end ############################# ################################# show book detail ##############################z def bs(): with open(\"bookdetail.pkl\",\"rb\")", "print(\"\\nTHANK YOU_!!!!\\nStudents Are Added Successfully\") ########################### student regs. End #################################### ########################## faculti reg.", "op == 1: br() elif op == 2: bs() elif op == 3:", ", Book Name]}'\\n\") print(objdist) ############################### show book detail end ########################### ####################### show student", "with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies and Name :\")", "login ############## if Admin_LogIn(self): main() def sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def", ": rb() elif op == 9 : sbr() elif op == 10: fbi()", "################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def rb(): a2=Admin() a2.bookremove()", "remove ################### def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book return #########################", "#################### book remove ################### def rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book", "####################### ############################ studentbook Issue ################### def sbi(): a1=Admin() a1.sbookissue() #################### book remove ###################", "book return ###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine()", "a1=Admin() a1.sbookissue() #################### book remove ################### def rb(): a2=Admin() a2.bookremove() def sbr(): #############", "bs(): with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showbook() except", "###################### a5=Admin() a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def sb():", "Admin Menu ################### # main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def", "book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def", "########################## faculti reg. start ################################## def fr(): l=[] n=int(input(\"How many Faculty you want", "with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show student detail end ################################# ############################", "as fp: while True: try: obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent()", "sb(): Search_Book() def main(): while True: print(\"\\n************************************* Admin Menu ***************************************\") op = int(input(\"\\nEnter", "print(objdist) ############################ show student detail end ################################# ############################ show faculty detail start ##########################", "for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l:", "break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end", "l=[] n=int(input(\"Enter No of Book to Add: \")) for _ in range(n): print(\"\\n_______________________________________________\")", "sbookissue(self): ############## Book Issue ################# SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self)", "sbi() elif op == 8 : rb() elif op == 9 : sbr()", "_ in range(n): print(\"\\n_______________________________________________\") print(f\"Enter Detail for Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\")", "cal ###################### DayFine_cal(self) ################################################################################################ #from adminclass import* #import pickle # from studentclass import*", "issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty book return ########### FBook_Return(self) def dayfine(self):", "book detail end ########################### ####################### show student detail ############################## def ss(): with open(\"studentdetail.pkl\",\"rb\")", "fp: while True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break", "End ###################################### ########################### Student reg. start #################################### def sr(): l=[] n = int(input(\"Enter", "{_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK", "open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################### show faculty detail end ################################## ################################# Admin", "showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def adminlogin(self): ############## Admin login ############## if", "Remaining Copies and Name :\") print(\"'{Book code : [no. of Copies , Book", "obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist)", "Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove Book \\n9 Student", "Book Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for", "sbookreturn(self): ################# student book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue", "Faculty: {_+1}\\n\") l.append(Admin()) l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp)", "studentclass import* # from facultyclass import* ########################## BOOK regs. start ########################################## def br():", "############## faculty reg. ############### Register_Faculty(self,Admin.fdist) def showfaculty(self): ############# faculty detail ############ Display_Faculty(self) def", "= pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData Finish\") break print(\"_\"*55) with open(\"fdist.pkl\",\"rb\") as p:", "# print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p:", "EOFError: print(\"\\n______________________Data Finish\") break print(\"_\"*55) with open(\"sdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(objdist) ############################ show", "#################### fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def main(): while True:", "rb(): a2=Admin() a2.bookremove() def sbr(): ############# student book return ######################### a3=Admin() a3.sbookreturn() def", "# def Main(self): ############## Admin Menu ################### # main(self) def getbook(self): ############ book", "a5.fbookreturn() def fc(): #################### fine calculate ################### a5=Admin() a5.dayfine() def sb(): Search_Book() def", "################### # main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ###############", ": sbr() elif op == 10: fbi() elif op == 11: fbr() elif", "############################### show book detail end ########################### ####################### show student detail ############################## def ss():", "pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nBooks Are Added Successfully\") ######################## BOOK regs. End ###################################### ########################### Student", "Name :\") print(\"'{Book code : [no. of Copies , Book Name]}'\\n\") print(objdist) ###############################", "= pickle.load(fp) obj.showbook() except EOFError: print(\"\\n___________________Data Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p:", "Student Detail\\n5 Faculty Reg.\\n6 Faculty Detail\\n7 Student Book Issue \\n8 Remove Book \\n9", "open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showfaculty() except EOFError: print(\"\\nData", "fr() elif op == 6: fs() elif op == 7: sbi() elif op", "l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nStudents", "elif op == 10: fbi() elif op == 11: fbr() elif op ==", "def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail #############", "############################### show faculty detail end ################################## ################################# Admin LogIn ################################ # def al():", "import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} # def Main(self): ############## Admin", "regs. start ########################################## def br(): l=[] n=int(input(\"Enter No of Book to Add: \"))", "op == 2: bs() elif op == 3: sr() elif op == 4:", "showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist)", "Detail for Student: {_+1}\\n\") l.append(Admin()) l[_].getstudent() with open(\"studentdetail.pkl\",\"wb\") as fp: for _ in", "########## Display_Book(self) def getstudent(self): ############### student reg. ################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student", "# f1=Faculty() # f1.facultylogin() ############################ faculty login End ####################### ############################ studentbook Issue ###################", "== 11: fbr() elif op == 12: fc() elif op == 13: sb()", "main(self) def getbook(self): ############ book reg. ################# Add_Book(self,Admin.bdist) def showbook(self): ############### book detail", "SBook_Return(self) def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self) def fbookreturn(self): ######################## faculty", "Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for Search Book \\n0", "elif op == 5: fr() elif op == 6: fs() elif op ==", "student book return ############### SBook_Return(self) def fbookissue(self): ##################### faculty book issue ############## FBook_Issue(self)", "with open(\"bookdetail.pkl\",\"rb\") as fp: while True: try: obj = pickle.load(fp) obj.showbook() except EOFError:", "Faculty Detail\\n7 Student Book Issue \\n8 Remove Book \\n9 Student Book Return \\n10", "as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are Added Successfully\") ########################", "True: try: obj = pickle.load(fp) # print(type(obj)) # print(obj) obj.showstudent() except EOFError: print(\"\\n______________________Data", "sdist={} fdist={} bdist={} # def Main(self): ############## Admin Menu ################### # main(self) def", "Finish\") break print(\"_\"*55) with open(\"bdist.pkl\",\"rb\") as p: objdist=pickle.load(p) print(\"Book code with Remaining Copies", "elif op == 11: fbr() elif op == 12: fc() elif op ==", "############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} # def Main(self): ############## Admin Menu", "from function import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} # def Main(self):", "== 1: br() elif op == 2: bs() elif op == 3: sr()", "def showbook(self): ############### book detail ########## Display_Book(self) def getstudent(self): ############### student reg. ################", "function import* ############################################################################### ################################################################### class Admin: sdist={} fdist={} bdist={} # def Main(self): ##############", "sbi(): a1=Admin() a1.sbookissue() #################### book remove ################### def rb(): a2=Admin() a2.bookremove() def sbr():", "BOOK regs. End ###################################### ########################### Student reg. start #################################### def sr(): l=[] n", "################ Register_Student(self,Admin.sdist) def showstudent(self): ############### student detail ############# Display_Student(self) def getfaculty(self): ############## faculty", "############################# Admin LogIn End ################################## ############################ Student Log start ################################## # def sl():", "detail start ########################## def fs(): with open(\"facultydetail.pkl\",\"rb\") as fp: while True: try: obj", "start ################################## # def sl(): # s1=Student() # s1.studentlogin() ############################## student log end", "SBook_Issue(self) def bookremove(self): ################ book remove ################## Book_Remove(self) def sbookreturn(self): ################# student book", "Return \\n10 Faculty Book Issue\\n11 Faculty Book Return\\n12 To Calculate Fine\\n13 for Search", "l[_].getfaculty() with open(\"facultydetail.pkl\",\"wb\") as fp: for _ in l: pickle.dump(_,fp) print(\"\\nTHANK YOU_!!!!\\nFaculties Are" ]
[ "\"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\",", "name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path(", "login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path(", "path from . import views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()),", "login_required from django.urls import path from . import views app_name = \"edd.campaign\" urlpatterns", "= [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()),", "login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path( \"c/<str:slug>/page/<int:page>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail-paged\", ),", "\"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path( \"c/<str:slug>/page/<int:page>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail-paged\",", "import login_required from django.urls import path from . import views app_name = \"edd.campaign\"", "name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\",", "django.contrib.auth.decorators import login_required from django.urls import path from . import views app_name =", "path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path( \"c/<str:slug>/page/<int:page>/\", login_required(views.CampaignDetailView.as_view()),", ". import views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path(", "django.urls import path from . import views app_name = \"edd.campaign\" urlpatterns = [", "\"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path(", "import views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\",", "name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path( \"c/<str:slug>/page/<int:page>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail-paged\", ), ]", "= \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ),", "[ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\",", "views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()),", "path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ),", "app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\",", "import path from . import views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\",", "from django.urls import path from . import views app_name = \"edd.campaign\" urlpatterns =", "path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()),", "from django.contrib.auth.decorators import login_required from django.urls import path from . import views app_name", "login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ),", "), path( \"c/<str:slug>/\", login_required(views.CampaignDetailView.as_view()), name=\"detail\", ), path( \"c/<str:slug>/permissions/\", login_required(views.CampaignPermissionView.as_view()), name=\"permission\", ), path( \"c/<str:slug>/page/<int:page>/\",", "from . import views app_name = \"edd.campaign\" urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"),", "urlpatterns = [ path(\"campaign/\", login_required(views.CampaignIndexView.as_view()), name=\"index\"), path( \"campaign/<int:page>/\", login_required(views.CampaignIndexView.as_view()), name=\"index-paged\", ), path( \"c/<str:slug>/\"," ]
[ "} return context @staticmethod def _get_courses(): courses = [] seen_courses = set() for", "[] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not None:", ") key = name if key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME:", "( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs):", ") return courses @staticmethod def _get_instructors(): instructors = [] seen_instructors = set() for", "if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept}", "= MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name", "from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, )", "self._get_instructors(), } return context @staticmethod def _get_courses(): courses = [] seen_courses = set()", "= name if key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, }", "Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses(): courses = [] seen_courses =", "instructors.append( { Attr.NAME: name, } ) return instructors @staticmethod def _get_pages(): pages =", "in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name", ") key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue", "return courses @staticmethod def _get_instructors(): instructors = [] seen_instructors = set() for instructor", "seen_instructors = set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr =", "def _get_courses(): courses = [] seen_courses = set() for course in Course.objects.all(): if", "[] seen_courses = set() for course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr", "= \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS:", "for course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\",", "{number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT:", "key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return instructors", "Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses(): courses", "= \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key) courses.append(", "set() for course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\",", "@staticmethod def _get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first()", "for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\",", "key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key)", "_get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page", ") return instructors @staticmethod def _get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS:", "instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", )", "{ Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses():", "if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name}", "from django.views.generic import TemplateView from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor", "context @staticmethod def _get_courses(): courses = [] seen_courses = set() for course in", "seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return instructors @staticmethod def _get_pages(): pages", "django.views.generic import TemplateView from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor from", "def _get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if", "page is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :]", "continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number,", "= { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def", "hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class", "course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr,", "= instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key", ") if key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number,", "<reponame>erictang000/hknweb<filename>hknweb/course_surveys/views.py from django.views.generic import TemplateView from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course,", "not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( {", "COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name = page.name if", ":] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" + page_path, } ) return pages", "= [] seen_instructors = set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue", "first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key in seen_instructors: continue seen_instructors.add(key) instructors.append(", "if key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, }", "seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return instructors @staticmethod def", "= \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key in seen_instructors:", "name if key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } )", "courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors():", "@staticmethod def _get_instructors(): instructors = [] seen_instructors = set() for instructor in Instructor.objects.all():", "last_name=most_recent_icsr.last_name, ) key = name if key in seen_instructors: continue seen_instructors.add(key) instructors.append( {", "= course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if", "template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(),", "key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } )", "not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format(", "not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format(", "continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name,", "class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(),", "import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self,", "} ) return instructors @staticmethod def _get_pages(): pages = [] for page_path in", "set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\",", "_get_instructors(): instructors = [] seen_instructors = set() for instructor in Instructor.objects.all(): if not", "in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return instructors @staticmethod", "instructors = [] seen_instructors = set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists():", "page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\"", "if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" +", "= page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH:", "courses @staticmethod def _get_instructors(): instructors = [] seen_instructors = set() for instructor in", "from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import (", "Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name =", "is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append(", "\"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses:", "\"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in", "return instructors @staticmethod def _get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page", "dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr,", "page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" + page_path, }", "def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), }", "context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod", "instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key =", "COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = {", "MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name =", "return context @staticmethod def _get_courses(): courses = [] seen_courses = set() for course", "page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name =", "\"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name", ") name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key", "IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES:", "Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name =", "seen_courses = set() for course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr =", "Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key =", "[] seen_instructors = set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr", "instructors @staticmethod def _get_pages(): pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page =", "courses = [] seen_courses = set() for course in Course.objects.all(): if not course.icsr_course.exists():", "{ Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors(): instructors", "page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" + page_path, } ) return", "\"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key in seen_instructors: continue", "Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name", "most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors(): instructors = []", "= [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not", "} ) return courses @staticmethod def _get_instructors(): instructors = [] seen_instructors = set()", "most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors(): instructors = [] seen_instructors =", "_get_courses(): courses = [] seen_courses = set() for course in Course.objects.all(): if not", "hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def", "page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" + page_path,", "pages = [] for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is", "hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import ( Attr,", "**kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context", "COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context =", "name, } ) return instructors @staticmethod def _get_pages(): pages = [] for page_path", "= set() for instructor in Instructor.objects.all(): if not instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest(", "self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses(): courses =", "name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key in", "seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses", "= set() for course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest(", "in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return", "\"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(),", "MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS,", "{last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if key in seen_instructors: continue seen_instructors.add(key)", "Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses(): courses = []", "get_context_data(self, **kwargs): context = { Attr.PAGES: self._get_pages(), Attr.COURSES: self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return", "number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER:", "import Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView):", "in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name = page.name", "def _get_instructors(): instructors = [] seen_instructors = set() for instructor in Instructor.objects.all(): if", "TemplateView from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import", "Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors(): instructors =", "import TemplateView from hknweb.markdown_pages.models import MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants", "if key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return", "Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def _get_instructors(): instructors = [] seen_instructors", "import MarkdownPage from hknweb.academics.models import Course, Instructor from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX,", "from hknweb.course_surveys.constants import ( Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\"", "key = name if key in seen_instructors: continue seen_instructors.add(key) instructors.append( { Attr.NAME: name,", "course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key", "continue seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod", "page = MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX):", "in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key", "most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, )", "= [] seen_courses = set() for course in Course.objects.all(): if not course.icsr_course.exists(): continue", "instructor.icsr_instructor.exists(): continue most_recent_icsr = instructor.icsr_instructor.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name,", "self._get_courses(), Attr.INSTRUCTORS: self._get_instructors(), } return context @staticmethod def _get_courses(): courses = [] seen_courses", "Attr, COURSE_SURVEY_PREFIX, COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS, ) class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context", "for page_path in COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS: page = MarkdownPage.objects.filter(path=page_path).first() if page is not None: page_name", "None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME:", "if page is not None: page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX)", "course in Course.objects.all(): if not course.icsr_course.exists(): continue most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", )", "{ Attr.NAME: name, } ) return instructors @staticmethod def _get_pages(): pages = []", "page_name = page.name if page_name.startswith(COURSE_SURVEY_PREFIX): page_name = page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name,", "most_recent_icsr = course.icsr_course.latest( \"icsr_semester__year\", \"-icsr_semester__year_section\", ) key = \"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, )", "@staticmethod def _get_courses(): courses = [] seen_courses = set() for course in Course.objects.all():", "continue seen_instructors.add(key) instructors.append( { Attr.NAME: name, } ) return instructors @staticmethod def _get_pages():", "\"-icsr_semester__year_section\", ) name = \"{first_name} {last_name}\".format( first_name=most_recent_icsr.first_name, last_name=most_recent_icsr.last_name, ) key = name if", "Attr.NAME: name, } ) return instructors @staticmethod def _get_pages(): pages = [] for", "\"{dept} {number}\".format( dept=most_recent_icsr.icsr_department.abbr, number=most_recent_icsr.course_number, ) if key in seen_courses: continue seen_courses.add(key) courses.append( {", "= page_name[len(COURSE_SURVEY_PREFIX) :] pages.append( { Attr.NAME: page_name, Attr.PATH: \"/pages/\" + page_path, } )", ") class IndexView(TemplateView): template_name = \"course_surveys/index.html\" def get_context_data(self, **kwargs): context = { Attr.PAGES:", "seen_courses.add(key) courses.append( { Attr.DEPT: most_recent_icsr.icsr_department.abbr, Attr.NUMBER: most_recent_icsr.course_number, } ) return courses @staticmethod def" ]
[ "PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \" \" * (PYRAMID_HEIGHT - line)", "+ \"*\" * line + \" \" * (PYRAMID_HEIGHT - line) + WALL)", "codes/triangle.py # Lines count, changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for line", "Lines count, changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for line in range(1,", "changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL", "print(WALL + \"*\" * line + \" \" * (PYRAMID_HEIGHT - line) +", "count, changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1):", "= \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line +", "PYRAMID_HEIGHT = 6 WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL +", "\"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \"", "6 WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" *", "= 6 WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\"", "# Lines count, changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for line in", "in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \" \" * (PYRAMID_HEIGHT", "for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \" \"", "WALL = \"|\" for line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line", "line in range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \" \" *", "range(1, PYRAMID_HEIGHT+1): print(WALL + \"*\" * line + \" \" * (PYRAMID_HEIGHT -", "<filename>python codes/triangle.py # Lines count, changable. PYRAMID_HEIGHT = 6 WALL = \"|\" for" ]
[ "@property def health(self): return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def", "return self._high_score def score_up(self, kills): \"\"\" Adds user scores based on the number", "= 0 self._level = 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender", "kills): \"\"\" Adds user scores based on the number of kills and the", "= settings self._scoreboard = scoreboard # High score never resets throughout the game", "return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed", "def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self):", "self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def", "bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level", "self._level @property def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if ammo !=", "# Update high score if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property", "Scoreboard): self._settings = settings self._scoreboard = scoreboard # High score never resets throughout", "High score never resets throughout the game self._high_score = 0 self._set_initial_values() def reset(self):", "Rerender all displaying statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health,", "0 self._level = 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all", "\\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property", "@property def _game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo @ammo.setter", "gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard", "self._scoreboard = scoreboard # High score never resets throughout the game self._high_score =", "self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo", "self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property", "self._level * 2 self._scoreboard.render_score(self._score) # Update high score if self._score > self._high_score: self._high_score", "game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0", "Adds user scores based on the number of kills and the level \"\"\"", "self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self): self._level +=", "Update high score if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def", "Settings from peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\" def", "= 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level =", "self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self): return self._score @property def high_score(self):", "2 self._scoreboard.render_score(self._score) # Update high score if self._score > self._high_score: self._high_score = self._score", "-= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self): self._level += 1", "health(self): return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return", "self._high_score def score_up(self, kills): \"\"\" Adds user scores based on the number of", "import Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self, settings: Settings,", "self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying statistics after it has been", "> self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self):", "level \"\"\" self._score += kills * self._level * 2 self._scoreboard.render_score(self._score) # Update high", "* self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self):", "_game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo @ammo.setter def ammo(self,", "@property def high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds user scores based", "* self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed *", "def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard #", "def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate **", "def player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed,", "after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self):", "kills * self._level * 2 self._scoreboard.render_score(self._score) # Update high score if self._score >", "the number of kills and the level \"\"\" self._score += kills * self._level", "scoreboard # High score never resets throughout the game self._high_score = 0 self._set_initial_values()", "Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard:", "self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level = 1 self._health = self._settings.health_limit self._ammo", "def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed", "def high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds user scores based on", "@property def level(self): return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def", "enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return", "__init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard # High", "self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property", "number of kills and the level \"\"\" self._score += kills * self._level *", "@property def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if ammo != self._ammo:", "score never resets throughout the game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values()", "displaying statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property", "ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if ammo != self._ammo: self._scoreboard.render_ammo(ammo) self._ammo", "@property def score(self): return self._score @property def high_score(self): return self._high_score def score_up(self, kills):", "return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level", "on the number of kills and the level \"\"\" self._score += kills *", "* 2 self._scoreboard.render_score(self._score) # Update high score if self._score > self._high_score: self._high_score =", "been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self): return self._score @property", "self._score @property def high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds user scores", "and the level \"\"\" self._score += kills * self._level * 2 self._scoreboard.render_score(self._score) #", "self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health)", "-self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self):", "high score if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self):", "def score_up(self, kills): \"\"\" Adds user scores based on the number of kills", "= scoreboard # High score never resets throughout the game self._high_score = 0", "self._level, self._health, self._ammo) @property def score(self): return self._score @property def high_score(self): return self._high_score", "** self._level @property def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if ammo", "1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level)", "all displaying statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo)", "= self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self): self._health -= 1", "self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed", "def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level = 1 self._health =", "self._high_score, self._level, self._health, self._ammo) @property def score(self): return self._score @property def high_score(self): return", "self._ammo) @property def score(self): return self._score @property def high_score(self): return self._high_score def score_up(self,", "self._ammo @ammo.setter def ammo(self, ammo): if ammo != self._ammo: self._scoreboard.render_ammo(ammo) self._ammo = ammo", "Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard # High score never", "@property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def", "score(self): return self._score @property def high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds", "* self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self): return", "def _game_speed(self): return self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo @ammo.setter def", "of kills and the level \"\"\" self._score += kills * self._level * 2", "self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed *", "score_up(self, kills): \"\"\" Adds user scores based on the number of kills and", "return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed", "self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self): self._health", "self._score += kills * self._level * 2 self._scoreboard.render_score(self._score) # Update high score if", "= self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying statistics after it has", "resets throughout the game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self):", "def score(self): return self._score @property def high_score(self): return self._high_score def score_up(self, kills): \"\"\"", "peach_invasion.settings import Settings from peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming statistics", "self._health, self._ammo) @property def score(self): return self._score @property def high_score(self): return self._high_score def", "kills and the level \"\"\" self._score += kills * self._level * 2 self._scoreboard.render_score(self._score)", "self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying statistics after it", "from peach_invasion.settings import Settings from peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming", "player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self): self._level", "return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0,", "self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed", "high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds user scores based on the", "self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level = 1 self._health", "self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def", "def level(self): return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self):", "def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self):", "statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard =", "\"\"\" self._score += kills * self._level * 2 self._scoreboard.render_score(self._score) # Update high score", "* self._level * 2 self._scoreboard.render_score(self._score) # Update high score if self._score > self._high_score:", "\"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard", "settings: Settings, scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard # High score", "level(self): return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return", "= 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying statistics", "self._score = 0 self._level = 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit #", "it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self): return", "reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level = 1 self._health = self._settings.health_limit", "+= kills * self._level * 2 self._scoreboard.render_score(self._score) # Update high score if self._score", "self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health def player_lost_life(self): self._health -=", "def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if ammo != self._ammo: self._scoreboard.render_ammo(ammo)", "\"\"\" Adds user scores based on the number of kills and the level", "self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level", "0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level @property def", "1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying statistics after", "throughout the game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score", "based on the number of kills and the level \"\"\" self._score += kills", "changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self): return self._score @property def", "\"\"\" Track gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings =", "if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return self._health", "from peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self,", "self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed *", "player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\", "return self._ammo @ammo.setter def ammo(self, ammo): if ammo != self._ammo: self._scoreboard.render_ammo(ammo) self._ammo =", "def health(self): return self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self):", "return self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo):", "self._scoreboard.render_health(self._health) @property def level(self): return self._level def level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property", "self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed @property def", "@property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate", "class Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard):", "self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed", "self._settings.speedup_rate ** self._level @property def ammo(self): return self._ammo @ammo.setter def ammo(self, ammo): if", "1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return", "_set_initial_values(self): self._score = 0 self._level = 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit", "self._game_speed @property def bullet_speed(self): return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return", "user scores based on the number of kills and the level \"\"\" self._score", "self._scoreboard.render_score(self._score) # Update high score if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score)", "0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score = 0 self._level = 1", "never resets throughout the game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def", "self._settings = settings self._scoreboard = scoreboard # High score never resets throughout the", "import Settings from peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\"", "scoreboard: Scoreboard): self._settings = settings self._scoreboard = scoreboard # High score never resets", "self._level = 1 self._health = self._settings.health_limit self._ammo = self._settings.ammo_limit # Rerender all displaying", "# High score never resets throughout the game self._high_score = 0 self._set_initial_values() def", "statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def", "score if self._score > self._high_score: self._high_score = self._score self._scoreboard.render_high_score(self._high_score) @property def health(self): return", "the level \"\"\" self._score += kills * self._level * 2 self._scoreboard.render_score(self._score) # Update", "self._health def player_lost_life(self): self._health -= 1 self._scoreboard.render_health(self._health) @property def level(self): return self._level def", "Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings", "peach_invasion.ui.scoreboard import Scoreboard class Stats: \"\"\" Track gaming statistics \"\"\" def __init__(self, settings:", "self._settings.ammo_limit # Rerender all displaying statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score,", "return 0, -self._settings.bullet_speed * self._game_speed @property def _game_speed(self): return self._settings.speedup_rate ** self._level @property", "# Rerender all displaying statistics after it has been changed self._scoreboard.render_all(self._score, self._high_score, self._level,", "scores based on the number of kills and the level \"\"\" self._score +=", "def _set_initial_values(self): self._score = 0 self._level = 1 self._health = self._settings.health_limit self._ammo =", "level_up(self): self._level += 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed @property", "the game self._high_score = 0 self._set_initial_values() def reset(self): self._set_initial_values() def _set_initial_values(self): self._score =", "* self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed * self._game_speed, \\ self._settings.enemy_y_speed * self._game_speed", "@property def player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self): return self._settings.enemy_x_speed *", "has been changed self._scoreboard.render_all(self._score, self._high_score, self._level, self._health, self._ammo) @property def score(self): return self._score", "settings self._scoreboard = scoreboard # High score never resets throughout the game self._high_score", "return self._score @property def high_score(self): return self._high_score def score_up(self, kills): \"\"\" Adds user", "Track gaming statistics \"\"\" def __init__(self, settings: Settings, scoreboard: Scoreboard): self._settings = settings", "self._ammo = self._settings.ammo_limit # Rerender all displaying statistics after it has been changed", "= self._settings.ammo_limit # Rerender all displaying statistics after it has been changed self._scoreboard.render_all(self._score,", "+= 1 self._scoreboard.render_level(self._level) @property def player_speed(self): return self._settings.player_speed * self._game_speed @property def enemy_speed(self):" ]
[ "inplace=True) # encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))", "# !pip install h5py pyyaml from __future__ import absolute_import, division, print_function, unicode_literals import", "= 100 # # The patience parameter is the amount of epochs to", "\"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create", "{:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') # weights = model.get_weights() #", "string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) # encode airlines to their numbers", "# # define the model # model = build_model(train_dataset) # # train the", "each carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv')", "# !pip install tensorflow==2.0.0 # # Use some functions from tensorflow_docs # !pip", "Use some functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip install", "ensure Python compatibility. import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches", "-q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__ import absolute_import, division, print_function,", "df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map", "for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, #", "save it for each carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) +", "list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global train_stats return (x - train_stats['mean'])", "# getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' +", "train_stats return (x - train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds):", "GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from", "pd import seaborn as sns import tensorflow as tf from tensorflow import keras", "# fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)", "\"dist\": dist # } # test_input, model = processInput(input_) # from google.colab import", "linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from", "return model def do_create_models(): for carrier in carriers: # create a model and", "+ '.h5') # weights = model.get_weights() # fpkl = open('model-' + str(carrier) +", "+ str(carrier) + '.csv') # defining the train and test labels train_labels =", "The patience parameter is the amount of epochs to check for improvement #", "-*- coding: utf-8 -*- \"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is located", "df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode", "= df.drop(train_dataset.index) # # getting the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\")", "str(carrier) + '.csv') # defining the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY')", "Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # #", "3600 + x.minute * 60 + x.second dayOfWeek = 6 airline = 'AA'", "= input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ =", "norm(train_dataset) # normed_test_data = norm(test_dataset) # # define the model # model =", "# # normalize the data # normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset)", "norm(test_dataset) # # define the model # model = build_model(train_dataset) # # train", "input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\":", "'%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string):", "= norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model #", "= build_model(train_dataset) # # train the model # EPOCHS = 100 # #", "normalize the data # normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) # #", "loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs Error:", "airlines to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'],", "= LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in", "x.hour * 3600 + x.minute * 60 + x.second dayOfWeek = 6 airline", "# print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test dataset", "train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining the train and test labels", "= input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\":", "return df, model # input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline,", "pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create and", "return time_ def func(x): return x.hour * 3600 + x.minute * 60 +", "+ 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_,", "carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) #", "encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers: # create", "# train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining the train and test", "check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels,", "magic to ensure Python compatibility. import matplotlib as mpl import matplotlib.pyplot as plt", "df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats'", "weights = model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl',", "fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"]", "seaborn as sns import tensorflow as tf from tensorflow import keras import tensorflow_docs", "from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"]", "def ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64,", "# create the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index)", "https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot # !pip install -q seaborn", "# train the model # EPOCHS = 100 # # The patience parameter", "# # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae))", "conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def", "train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the train and test labels train_labels", "== 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return", "def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1)", "} # test_input, model = processInput(input_) # from google.colab import drive # drive.mount('/content/drive')", "in carriers: # create a model and save it for each carrier global", "return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string):", "\"sa\": sa, # \"dist\": dist # } # test_input, model = processInput(input_) #", "!pip install h5py pyyaml from __future__ import absolute_import, division, print_function, unicode_literals import pathlib", "create the train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset =", "from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor =", "open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) #", "* 3600 + x.minute * 60 + x.second dayOfWeek = 6 airline =", "calculating the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing", "# define the model # model = build_model(train_dataset) # # train the model", "plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import", "= norm(train_dataset) normed_test_data = norm(test_dataset) # # define the model # model =", "+ '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin", "str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder =", "and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats", "+ str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set", "encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map)", "'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin = input_[\"origin\"]", "# !pip install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__ import", "parameter is the amount of epochs to check for improvement # early_stop =", "\"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df = norm(df) model =", "print_function, unicode_literals import pathlib import numpy as np import pandas as pd import", "import numpy as np import pandas as pd import seaborn as sns import", "weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\":", "labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the", "'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f}", "model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights() # # fpkl = open('drive/My", "and save it for each carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier)", "carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1,", "Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') # weights =", "input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist,", "\"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist # } # test_input, model", "Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK ' +", "the model # model = build_model(train_dataset) # # train the model # EPOCHS", "unicode_literals import pathlib import numpy as np import pandas as pd import seaborn", "= datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour * 3600 + x.minute", "from tensorflow import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling #", "= model.get_weights() # fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights,", "# # train the model # EPOCHS = 100 # # The patience", "out IPython magic to ensure Python compatibility. import matplotlib as mpl import matplotlib.pyplot", "encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and", "= keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model # input_ = {", "train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data =", "defining the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') #", "model.save('models/model-' + str(carrier) + '.h5') print('OK ' + str(carrier)) # let's create the", "print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test dataset #", "do_create_models(): for carrier in carriers: # create a model and save it for", "df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats", "pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin", "import drive # drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay", "git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__ import absolute_import, division, print_function, unicode_literals", "%matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create and save all the models", "\"weekday\": weekday } df = pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' +", "Use seaborn for pairplot # !pip install -q seaborn # !pip install tensorflow==2.0.0", "encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create", "time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour", "\"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour * 3600", "= pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder()", "as sns import tensorflow as tf from tensorflow import keras import tensorflow_docs as", "pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin", "train_stats def norm(x): global train_stats return (x - train_stats['mean']) / train_stats['std'] def ret_stats():", "# -*- coding: utf-8 -*- \"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is", "'.csv') # defining the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels =", "= keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2,", "def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan", "LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train", "'%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string ==", "time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour * 3600 +", "compatibility. import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as patches", "import tensorflow_docs.modeling # Commented out IPython magic to ensure Python compatibility. import matplotlib", "the train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY')", "LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # #", "from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from", "dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats train_stats =", "numpy as np import pandas as pd import seaborn as sns import tensorflow", "print(stats) def processInput(input_): global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa =", "callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss # loss, mae, mse = model.evaluate(normed_test_data,", "string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string == 2400: time_string = 0", "= encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd,", "for pairplot # !pip install -q seaborn # !pip install tensorflow==2.0.0 # #", "0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss # loss, mae,", "Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') # weights = model.get_weights()", "the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set", "= train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining", "import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import GridSpec from sklearn import", "model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for carrier in carriers: #", "import seaborn as sns import tensorflow as tf from tensorflow import keras import", "define the model # model = build_model(train_dataset) # # train the model #", "= string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) # encode airlines to their", "datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string == 2400: time_string", "input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"]", "\"carrier\": airline, # \"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay, # \"sa\":", "model and save it for each carrier global train_stats df = pd.read_csv('carriers/carrier' +", "for carrier in carriers: # create a model and save it for each", "input pipeline from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def", "import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from collections import OrderedDict from", "'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT'])", "cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor", "# create a model and save it for each carrier if carr_ ==", "test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data", "encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0)", "test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data = norm(train_dataset) #", "from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return", "# model.save('models/model-' + str(carrier) + '.h5') # weights = model.get_weights() # fpkl =", "LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import", "%H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return", "import tensorflow as tf from tensorflow import keras import tensorflow_docs as tfdocs import", "# normalize the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # # define", "return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64,", "# EPOCHS = 100 # # The patience parameter is the amount of", "patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, #", "# getting the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats =", "ddelay, # \"sa\": sa, # \"dist\": dist # } # test_input, model =", "tfdocs.modeling.EpochDots()]) # # calculating the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels,", "800 dist = 1200 do_create_models() # global train_stats # stats = ret_stats() #", "time_string == 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4]))", "encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the", "IPython magic to ensure Python compatibility. import matplotlib as mpl import matplotlib.pyplot as", "utf-8 -*- \"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I", "dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and", "= {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\":", "# # getting the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats", "train the model # EPOCHS = 100 # # The patience parameter is", "# input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin,", "# # create the train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) #", "matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from collections", "model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') #", "and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data", "!ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay is: \", test_predictions_input[0], \" minutes\")", "weights = model.get_weights() # fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb') #", "dayOfWeek, # \"carrier\": airline, # \"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay,", "to ensure Python compatibility. import matplotlib as mpl import matplotlib.pyplot as plt import", "dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-'", "import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as patches from", "the model # EPOCHS = 100 # # The patience parameter is the", "int(time_string[2:4])) return time_ def func(x): return x.hour * 3600 + x.minute * 60", "pandas as pd import seaborn as sns import tensorflow as tf from tensorflow", "sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import", "train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset)", "# # Use some functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs #", "fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl,", "dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset train_dataset", "is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot # !pip", "sns import tensorflow as tf from tensorflow import keras import tensorflow_docs as tfdocs", "= pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the", "calculating the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # #", "tf from tensorflow import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling", "= keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer =", "the amount of epochs to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)", "seaborn for pairplot # !pip install -q seaborn # !pip install tensorflow==2.0.0 #", "# pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) #", "model and save it for each carrier if carr_ == carrier: df =", "+ str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map", "train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the train and", "print('OK ' + str(carrier)) # let's create the input pipeline from datetime import", "test_labels, verbose=2) # # weights = model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-'", "the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize", "import pathlib import numpy as np import pandas as pd import seaborn as", "= test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) #", "df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder =", "train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight')", "verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss # loss, mae, mse", "fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) #", "the data # normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) # # define", "df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test dataset # train_dataset", "= processInput(input_) # from google.colab import drive # drive.mount('/content/drive') # !ls # test_predictions_input", "# epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the", "= 1200 do_create_models() # global train_stats # stats = ret_stats() # print(stats) def", "as patches from matplotlib.patches import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import", "df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier", "encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create", "ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df = norm(df) model", "# create a model and save it for each carrier global train_stats df", "tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])", "time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_])", "# \"sa\": sa, # \"dist\": dist # } # test_input, model = processInput(input_)", "time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) #", "processInput(input_) # from google.colab import drive # drive.mount('/content/drive') # !ls # test_predictions_input =", "axis=1, inplace=True) # encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_,", "train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) #", "sd = 200 ddelay = -10 sa = 800 dist = 1200 do_create_models()", "import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit", "'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))", "pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global train_stats return", "== carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True)", "mpl import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch", "= dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset", "# calculating the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) #", "# \"dist\": dist # } # test_input, model = processInput(input_) # from google.colab", "df, model # input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, #", "= train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining the train", "drive # drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay is:", "datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import", "def do_create_models(): for carrier in carriers: # create a model and save it", "Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK '", "50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create and save all", "InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle", "str(carrier) + '.h5') print('OK ' + str(carrier)) # let's create the input pipeline", "'.h5') # weights = model.get_weights() # fpkl = open('model-' + str(carrier) + '-weights.pkl',", "from google.colab import drive # drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() #", "True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell #", "- train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([", "def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime", "datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if", "is the amount of epochs to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',", "train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the", "'.h5') print('OK ' + str(carrier)) # let's create the input pipeline from datetime", "time_sa = func(time_sa) # encode airlines to their numbers df = pd.read_csv('carriers/carrier' +", "str(carrier) +'.h5') print(\"OK\") return df, model # input_ = { # \"dayOfWeek\": dayOfWeek,", "tensorflow==2.0.0 # # Use some functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs", "ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'),", "train_stats # stats = ret_stats() # print(stats) def processInput(input_): global train_stats processed =", "import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1)", "\"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df =", "+ str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist =", "tensorflow_docs.modeling # Commented out IPython magic to ensure Python compatibility. import matplotlib as", "= open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK", "tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for carrier in carriers:", "pairplot # !pip install -q seaborn # !pip install tensorflow==2.0.0 # # Use", "else: if time_string == 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ =", "= input_[\"carrier\"] for carr_ in carriers: # create a model and save it", "= \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle #", "train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating", "' + str(carrier)) # let's create the input pipeline from datetime import datetime", "data # normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) # # define the", "/ train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu',", "activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return", "norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model # input_", "# print(stats) def processInput(input_): global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa", "import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string == 2400:", "!pip install tensorflow==2.0.0 # # Use some functions from tensorflow_docs # !pip install", "x.second dayOfWeek = 6 airline = 'AA' origin = 'LAX' dest = 'SEA'", "conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else:", "defining the train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels =", "tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model", "sa, # \"dist\": dist # } # test_input, model = processInput(input_) # from", "matplotlib.patches import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import GridSpec from sklearn", "norm(x): global train_stats return (x - train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats", "+ str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder", "# drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay is: \",", "of epochs to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history", "PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score,", "= True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell", "= \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour *", "model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-'", "by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn", "as tf from tensorflow import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import", "all the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats", "dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa,", "# } # test_input, model = processInput(input_) # from google.colab import drive #", "= open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL)", "= tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for carrier in", "# global train_stats # stats = ret_stats() # print(stats) def processInput(input_): global train_stats", "the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # # define the model", "mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f}", "minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK ' + str(carrier)) # let's", "getting the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose()", "# train_stats = train_stats.transpose() # # defining the train and test labels #", "model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer", "str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean", "\"\"\" # # Use seaborn for pairplot # !pip install -q seaborn #", "dayOfWeek = 6 airline = 'AA' origin = 'LAX' dest = 'SEA' sd", "stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # #", "normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) # # define the model #", "airline, # \"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa,", "encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test", ": time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday }", "# %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create and save all the", "model = processInput(input_) # from google.colab import drive # drive.mount('/content/drive') # !ls #", "encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] =", "mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae))", "protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier)", "origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday", "matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec", "drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay is: \", test_predictions_input[0],", "absolute_import, division, print_function, unicode_literals import pathlib import numpy as np import pandas as", "plt import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from collections import OrderedDict", "model def do_create_models(): for carrier in carriers: # create a model and save", "protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_", "as pd import seaborn as sns import tensorflow as tf from tensorflow import", "test_dataset = df.drop(train_dataset.index) # # getting the stats # train_stats = train_dataset.describe() #", "install h5py pyyaml from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import", "train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe()", "str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder =", "LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers:", "60 + x.second dayOfWeek = 6 airline = 'AA' origin = 'LAX' dest", "encode airlines to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed:", "verbose=2) # # weights = model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' +", "+ x.second dayOfWeek = 6 airline = 'AA' origin = 'LAX' dest =", "# test_dataset = df.drop(train_dataset.index) # # getting the stats # train_stats = train_dataset.describe()", "Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing", "return x.hour * 3600 + x.minute * 60 + x.second dayOfWeek = 6", "# fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # # pickle.dump(weights,", "return (x - train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model", "OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings", "= model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb')", "early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split =", "= [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa =", "= func(time_sa) # encode airlines to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"])", "# \"carrier\": airline, # \"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay, #", "df.drop(train_dataset.index) # # getting the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") #", "+ str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder", "df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting the stats # train_stats =", "Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot", "= input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay,", "it for each carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_)", "# \"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist # } # test_input,", "{ # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin, # \"sd\": sd,", "import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder,", "matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler,", "dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting the", "test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting", "model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # #", "create and save all the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) #", "# train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting the stats", "= pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global train_stats", "global train_stats def norm(x): global train_stats return (x - train_stats['mean']) / train_stats['std'] def", "and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # #", "import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split,", "# Use some functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip", "str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map =", "input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin, #", "\"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" #", "cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch',", "= { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin, # \"sd\":", "str(carrier)) # let's create the input pipeline from datetime import datetime def conv_to_datetime(str_):", "axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"]", "str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier))", "encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])", "activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def", "# import pickle # create and save all the models airlines = pd.read_csv('airlines.csv')", "airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global", "# encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT']", "test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # #", "install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__ import absolute_import, division,", "= ret_stats() # print(stats) def processInput(input_): global train_stats processed = [] time_sd =", "optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for carrier", "func(x): return x.hour * 3600 + x.minute * 60 + x.second dayOfWeek =", "model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model # input_ =", "= 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import pickle # create and save", "# normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) # # define the model", "train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') #", "= train_stats.transpose() # # defining the train and test labels # train_labels =", "= norm(train_dataset) # normed_test_data = norm(test_dataset) # # define the model # model", "normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # # define the model # model", "= 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns", "data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # # define the model #", "# test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data = norm(train_dataset)", "let's create the input pipeline from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_,", "fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' +", "model # model = build_model(train_dataset) # # train the model # EPOCHS =", "= model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights() # # fpkl =", "the train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index)", "from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib", "create the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) #", "metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder", "save all the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global", "= train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset) normed_test_data", "= 'AA' origin = 'LAX' dest = 'SEA' sd = 200 ddelay =", "test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats train_stats", "+ str(carrier) +'.h5') print(\"OK\") return df, model # input_ = { # \"dayOfWeek\":", "train_stats.transpose() # # defining the train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY')", "carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed:", "import pandas as pd import seaborn as sns import tensorflow as tf from", "import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out", "train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'),", "# train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining", "import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic", "as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic to ensure", "df = pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\")", "getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier)", "# print(carriers) global train_stats def norm(x): global train_stats return (x - train_stats['mean']) /", "\"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa, # \"dist\":", "time_ def func(x): return x.hour * 3600 + x.minute * 60 + x.second", "= 800 dist = 1200 do_create_models() # global train_stats # stats = ret_stats()", "# Use seaborn for pairplot # !pip install -q seaborn # !pip install", "encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers: # create a model and", "Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK ' + str(carrier))", "models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x):", "{:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK ' + str(carrier)) #", "2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_", "tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__", "train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset) normed_test_data =", "[] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa)", "build_model(train_dataset) # # train the model # EPOCHS = 100 # # The", "generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use", "test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data = norm(train_dataset) # normed_test_data =", "Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') # weights", "and save it for each carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier'", "= encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train and test dataset # train_dataset =", "# # The patience parameter is the amount of epochs to check for", "sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import", "train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining the train and", "to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data,", "]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for", "= 'SEA' sd = 200 ddelay = -10 sa = 800 dist =", "epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss", "minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') # weights = model.get_weights() # fpkl", "+ str(carrier)) # let's create the input pipeline from datetime import datetime def", "early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()])", "file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot #", "numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder", "from collections import OrderedDict from matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model", "the stats # train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() #", "model = build_model(train_dataset) # # train the model # EPOCHS = 100 #", "= encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset", "train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) #", "linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50", "encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset =", "100 # # The patience parameter is the amount of epochs to check", "= model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) #", "func(time_sa) # encode airlines to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) +", "# test_input, model = processInput(input_) # from google.colab import drive # drive.mount('/content/drive') #", "# !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The delay is: \", test_predictions_input[0], \"", "-*- \"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\"", "datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string", "import absolute_import, division, print_function, unicode_literals import pathlib import numpy as np import pandas", "df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0)", "= 'LAX' dest = 'SEA' sd = 200 ddelay = -10 sa =", "tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models():", "'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns =", "for each carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed:", "# defining the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY')", "origin = 'LAX' dest = 'SEA' sd = 200 ddelay = -10 sa", "each carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'],", "Python compatibility. import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as", "keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001)", "print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin]", "loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean", "+ 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the origin encoder = LabelEncoder()", "mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights() # # fpkl", "origin, # \"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist", "origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) #", "= pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return", "= norm(test_dataset) # # define the model # model = build_model(train_dataset) # #", "+ str(carrier) + '.h5') print('OK ' + str(carrier)) # let's create the input", "{\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday", "= df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\")", "+ str(carrier) + '.h5') # weights = model.get_weights() # fpkl = open('model-' +", "# print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ =", "dist = 1200 do_create_models() # global train_stats # stats = ret_stats() # print(stats)", "set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') print('OK", "functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml", "str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"]", "from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection", "if pd.isnull(time_string): return np.nan else: if time_string == 2400: time_string = 0 time_string", "train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting the", "edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\"", "encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers: #", "the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights", "keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython", "# callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss # loss, mae, mse =", "# weights = model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) +", "the train and test dataset train_dataset = df.sample(frac=0.8,random_state=0) test_dataset = df.drop(train_dataset.index) # getting", "} df = pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5')", "# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # print(\"Testing set Mean Abs", "-10 sa = 800 dist = 1200 do_create_models() # global train_stats # stats", "origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] =", "pickle # create and save all the models airlines = pd.read_csv('airlines.csv') carriers =", "!pip install -q seaborn # !pip install tensorflow==2.0.0 # # Use some functions", "and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # #", "# model = build_model(train_dataset) # # train the model # EPOCHS = 100", "scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray',", "curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) #", "keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0,", "the input pipeline from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S')", "def processInput(input_): global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"]))", "# create the train and test dataset # train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset", "input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_,", "InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") #", "'LAX' dest = 'SEA' sd = 200 ddelay = -10 sa = 800", "import OrderedDict from matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing", "print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5')", "sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing import", "= func(time_sd) time_sa = func(time_sa) # encode airlines to their numbers df =", "carriers: # create a model and save it for each carrier if carr_", "tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic to", "(x - train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model =", "for each carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_) +", "from matplotlib.patches import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import GridSpec from", "do_create_models() # global train_stats # stats = ret_stats() # print(stats) def processInput(input_): global", "pipeline from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_):", "= 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss # loss,", "for carr_ in carriers: # create a model and save it for each", "verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier)", "# train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data", "pathlib import numpy as np import pandas as pd import seaborn as sns", "df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode", "# The patience parameter is the amount of epochs to check for improvement", "build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ])", "# !pip install -q seaborn # !pip install tensorflow==2.0.0 # # Use some", "processInput(input_): global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd", "# # weights = model.get_weights() # # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier)", "IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline", "inline warnings.filterwarnings(\"ignore\") # import pickle # create and save all the models airlines", "norm(train_dataset) normed_test_data = norm(test_dataset) # # define the model # model = build_model(train_dataset)", "mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity =", "'-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error:", "# early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop,", "import GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize", "x.minute * 60 + x.second dayOfWeek = 6 airline = 'AA' origin =", "train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd)", "division, print_function, unicode_literals import pathlib import numpy as np import pandas as pd", "'AA' origin = 'LAX' dest = 'SEA' sd = 200 ddelay = -10", "their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True)", "warnings.filterwarnings(\"ignore\") # import pickle # create and save all the models airlines =", "= train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data", "encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test dataset train_dataset =", "tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic to ensure Python", "import pickle # create and save all the models airlines = pd.read_csv('airlines.csv') carriers", "coding: utf-8 -*- \"\"\"Model.ipynb Automatically generated by Colaboratory. Original file is located at", "improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS,", "= 200 ddelay = -10 sa = 800 dist = 1200 do_create_models() #", "the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def", "time_sd = func(time_sd) time_sa = func(time_sa) # encode airlines to their numbers df", "optimizer=optimizer, metrics=['mae', 'mse']) return model def do_create_models(): for carrier in carriers: # create", "import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_, '%H:%M:%S')", "test_input, model = processInput(input_) # from google.colab import drive # drive.mount('/content/drive') # !ls", "__future__ import absolute_import, division, print_function, unicode_literals import pathlib import numpy as np import", "df = norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model", "import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\")", "create a model and save it for each carrier global train_stats df =", "1200 do_create_models() # global train_stats # stats = ret_stats() # print(stats) def processInput(input_):", "# encode airlines to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv')", "train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]),", "metrics=['mae', 'mse']) return model def do_create_models(): for carrier in carriers: # create a", "amount of epochs to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) #", "# pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay", "carriers: # create a model and save it for each carrier global train_stats", "model.save('models/model-' + str(carrier) + '.h5') # weights = model.get_weights() # fpkl = open('model-'", "patches from matplotlib.patches import ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import GridSpec", "ddelay = -10 sa = 800 dist = 1200 do_create_models() # global train_stats", "labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data =", "= dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # # create the train", "the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT'])", "and save all the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE']) # print(carriers)", "-q seaborn # !pip install tensorflow==2.0.0 # # Use some functions from tensorflow_docs", "# InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 # %matplotlib inline warnings.filterwarnings(\"ignore\") # import", "model # EPOCHS = 100 # # The patience parameter is the amount", "some functions from tensorflow_docs # !pip install -q git+https://github.com/tensorflow/docs # !pip install h5py", "# stats = ret_stats() # print(stats) def processInput(input_): global train_stats processed = []", "ret_stats() # print(stats) def processInput(input_): global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"]))", "from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import numpy as np", "= LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) #", "sd, # \"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist # } #", "tensorflow import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_docs.modeling # Commented", "' + str(carrier)) origin = input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist", "encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the train and test", "carrier = input_[\"carrier\"] for carr_ in carriers: # create a model and save", "= 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x):", "\"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin, # \"sd\": sd, # \"ddelay\":", "input_[\"carrier\"] for carr_ in carriers: # create a model and save it for", "tensorflow as tf from tensorflow import keras import tensorflow_docs as tfdocs import tensorflow_docs.plots", "open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK '", "ConnectionPatch from collections import OrderedDict from matplotlib.gridspec import GridSpec from sklearn import metrics,", "import tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic to ensure Python compatibility.", "in carriers: # create a model and save it for each carrier if", "from sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures, StandardScaler, normalize from sklearn.preprocessing", "def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if time_string == 2400: time_string =", "airline = 'AA' origin = 'LAX' dest = 'SEA' sd = 200 ddelay", "# print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) +", "+ '.csv') # defining the train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels", "test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset)", "# loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights()", "create the input pipeline from datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d", "model # input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\":", "+ str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' +", "sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] =", "Colaboratory. Original file is located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for", "# # calculating the loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)", "train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the train and test labels", "200 ddelay = -10 sa = 800 dist = 1200 do_create_models() # global", "train and test labels train_labels = train_dataset.pop('ARRIVAL_DELAY') test_labels = test_dataset.pop('ARRIVAL_DELAY') # normalize the", "located at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot # !pip install", "normed_test_data = norm(test_dataset) # # define the model # model = build_model(train_dataset) #", "pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin = input_[\"origin\"] ddelay =", "return np.nan else: if time_string == 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string))", "it for each carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv')", "def func(x): return x.hour * 3600 + x.minute * 60 + x.second dayOfWeek", "+'.h5') print(\"OK\") return df, model # input_ = { # \"dayOfWeek\": dayOfWeek, #", "# \"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist #", "stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv')", "# create and save all the models airlines = pd.read_csv('airlines.csv') carriers = list(airlines['IATA_CODE'])", "create a model and save it for each carrier if carr_ == carrier:", "StandardScaler, normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict", "carr_ in carriers: # create a model and save it for each carrier", "train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') #", "save it for each carrier if carr_ == carrier: df = pd.read_csv('carriers/carrier' +", "= input_[\"origin\"] ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday =", "from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from scipy.optimize", "train_dataset = df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting the stats #", "train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def build_model(train_ds): model = keras.Sequential([ tf.keras.layers.Dense(64,", "pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT'])", "a model and save it for each carrier global train_stats df = pd.read_csv('carriers/carrier'", "seaborn # !pip install tensorflow==2.0.0 # # Use some functions from tensorflow_docs #", "np.nan else: if time_string == 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_", "pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-'", "as np import pandas as pd import seaborn as sns import tensorflow as", "normalize from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from", "# # defining the train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') #", "carriers = list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global train_stats return (x", "+ '.h5') print('OK ' + str(carrier)) # let's create the input pipeline from", "set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' + str(carrier) + '.h5') #", "'mse']) return model def do_create_models(): for carrier in carriers: # create a model", "\"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df = norm(df)", "# normalize the data # normed_train_data = norm(train_dataset) # normed_test_data = norm(test_dataset) #", "= train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the train and test", "0'], axis=1, inplace=True) # encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map =", "= df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose()", "\"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df", "as plt import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from collections import", "if time_string == 2400: time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]),", "string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) # encode airlines", "np import pandas as pd import seaborn as sns import tensorflow as tf", "0'], axis=1, inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier =", "print(\"OK\") return df, model # input_ = { # \"dayOfWeek\": dayOfWeek, # \"carrier\":", "ddelay = input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_", "import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import ConnectionPatch from", "train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize the data #", "origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" :", "func(time_sd) time_sa = func(time_sa) # encode airlines to their numbers df = pd.read_csv('carriers/carrier'", "= test_dataset.pop('ARRIVAL_DELAY') # # normalize the data # normed_train_data = norm(train_dataset) # normed_test_data", "# # fpkl = open('drive/My Drive/pickle_models/model-' + str(carrier) + '-weights.pkl', 'wb') # #", "# weights = model.get_weights() # fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb')", "input_[\"ddelay\"] origin_ = encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\"", "tensorflow_docs.plots import tensorflow_docs.modeling # Commented out IPython magic to ensure Python compatibility. import", "datetime import datetime def conv_to_datetime(str_): return datetime.strptime(str_, '%Y-%m-%d %H:%M:%S') def conv_to_time(str_): return datetime.strptime(str_,", "# normed_test_data = norm(test_dataset) # # define the model # model = build_model(train_dataset)", "inplace=True) encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for", "!pip install -q git+https://github.com/tensorflow/docs # !pip install h5py pyyaml from __future__ import absolute_import,", "6 airline = 'AA' origin = 'LAX' dest = 'SEA' sd = 200", "global train_stats processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd =", "= -10 sa = 800 dist = 1200 do_create_models() # global train_stats #", "pd.isnull(time_string): return np.nan else: if time_string == 2400: time_string = 0 time_string =", "datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return x.hour * 3600 + x.minute *", "# from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = \"last_expr\" pd.options.display.max_columns = 50 #", "def norm(x): global train_stats return (x - train_stats['mean']) / train_stats['std'] def ret_stats(): return", "pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df,", "warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell", "from matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing import PolynomialFeatures,", "'SEA' sd = 200 ddelay = -10 sa = 800 dist = 1200", "train_stats = train_dataset.describe() # train_stats.pop(\"ARRIVAL_DELAY\") # train_stats = train_stats.transpose() # # defining the", "# from google.colab import drive # drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten()", "# \"origin\": origin, # \"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa, #", "patience parameter is the amount of epochs to check for improvement # early_stop", "train_stats = train_stats.transpose() # # defining the train and test labels # train_labels", "pyyaml from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import numpy as", "# # Use seaborn for pairplot # !pip install -q seaborn # !pip", "\"sd\": sd, # \"ddelay\": ddelay, # \"sa\": sa, # \"dist\": dist # }", "mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights() # #", "input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae',", "loss # loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights =", "global train_stats # stats = ret_stats() # print(stats) def processInput(input_): global train_stats processed", "at https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I \"\"\" # # Use seaborn for pairplot # !pip install -q", "a model and save it for each carrier if carr_ == carrier: df", "# Commented out IPython magic to ensure Python compatibility. import matplotlib as mpl", "= LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) df['ORIGIN_AIRPORT'] = encoder.fit_transform(df['ORIGIN_AIRPORT']) # create the", "+ x.minute * 60 + x.second dayOfWeek = 6 airline = 'AA' origin", "origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df = pd.DataFrame([input_]) df =", "the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) +", "= df.sample(frac=0.8,random_state=0) # test_dataset = df.drop(train_dataset.index) # # getting the stats # train_stats", "plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity", "encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_", "tf.keras.layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse',", "* 60 + x.second dayOfWeek = 6 airline = 'AA' origin = 'LAX'", "normalize the data normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # # define the", "keras.models.load_model('models/model-' + str(carrier) +'.h5') print(\"OK\") return df, model # input_ = { #", "'-weights.pkl', 'wb') # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print('OK ' + str(carrier)) origin =", "# let's create the input pipeline from datetime import datetime def conv_to_datetime(str_): return", "= pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True) # encode the", "encoded_data_map[origin] dist = input_[\"dist\"] weekday = input_[\"dayOfWeek\"] input_ = {\"time_insec_dep\" : time_sd, \"time_insec_arr\":", "weekday } df = pd.DataFrame([input_]) df = norm(df) model = keras.models.load_model('models/model-' + str(carrier)", "stats = ret_stats() # print(stats) def processInput(input_): global train_stats processed = [] time_sd", "global train_stats return (x - train_stats['mean']) / train_stats['std'] def ret_stats(): return train_stats def", "activation='relu', input_shape=[len(train_ds.keys())]), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer,", "0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def func(x): return", "dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers: # create a model", "EPOCHS = 100 # # The patience parameter is the amount of epochs", "sa = 800 dist = 1200 do_create_models() # global train_stats # stats =", "loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) # # weights = model.get_weights() #", "the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) # print(encoded_data_map) df['ORIGIN_AIRPORT']", "processed = [] time_sd = string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa", "google.colab import drive # drive.mount('/content/drive') # !ls # test_predictions_input = model.predict(test_input).flatten() # print(\"The", "global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1, inplace=True)", "= list(airlines['IATA_CODE']) # print(carriers) global train_stats def norm(x): global train_stats return (x -", "= model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) #", "time_string = 0 time_string = \"{0:04d}\".format(int(time_string)) time_ = datetime.time(int(time_string[0:2]), int(time_string[2:4])) return time_ def", "epochs to check for improvement # early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history =", "carrier global train_stats df = pd.read_csv('carriers/carrier' + str(carrier) + 'data.csv') df.drop(['Unnamed: 0'], axis=1,", "validation_split = 0.2, verbose=0, # callbacks=[early_stop, tfdocs.modeling.EpochDots()]) # # calculating the loss #", "= 6 airline = 'AA' origin = 'LAX' dest = 'SEA' sd =", "# encode the origin encoder = LabelEncoder() encoder.fit(df['ORIGIN_AIRPORT']) encoded_data_map = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) #", "h5py pyyaml from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import numpy", "return datetime.strptime(str_, '%H:%M:%S') import datetime def string_to_time(time_string): if pd.isnull(time_string): return np.nan else: if", "= string_to_time(np.int64(input_[\"sd\"])) time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) # encode", "as mpl import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import", "import warnings plt.rcParams[\"patch.force_edgecolor\"] = True plt.style.use('fivethirtyeight') mpl.rc('patch', edgecolor = 'dimgray', linewidth=1) # from", "train_stats.pop(\"ARRIVAL_DELAY\") train_stats = train_stats.transpose() train_stats.to_csv('stats/train_stats' + str(carrier) + '.csv') # defining the train", "carrier in carriers: # create a model and save it for each carrier", "collections import OrderedDict from matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model from", "OrderedDict from matplotlib.gridspec import GridSpec from sklearn import metrics, linear_model from sklearn.preprocessing import", "+ '-weights.pkl', 'wb') # # pickle.dump(weights, fpkl, protocol=pickle.HIGHEST_PROTOCOL) # print(\"Testing set Mean Abs", "dist # } # test_input, model = processInput(input_) # from google.colab import drive", "test_labels, verbose=2) # print(\"Testing set Mean Abs Error: {:5.2f} minutes\".format(mae)) # model.save('models/model-' +", "install tensorflow==2.0.0 # # Use some functions from tensorflow_docs # !pip install -q", "time_sa = string_to_time(np.int64(input_[\"sa\"])) time_sd = func(time_sd) time_sa = func(time_sa) # encode airlines to", "matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches", "time_sd, \"time_insec_arr\": time_sa, \"ORIGIN_AIRPORT\": origin_, \"DEPARTURE_DELAY\": ddelay, \"DISTANCE\": dist, \"weekday\": weekday } df", "if carr_ == carrier: df = pd.read_csv('carriers/carrier' + str(carr_) + 'data.csv') df.drop(['Unnamed: 0'],", "# defining the train and test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels", "print(carriers) global train_stats def norm(x): global train_stats return (x - train_stats['mean']) / train_stats['std']", "to their numbers df = pd.read_csv('carriers/carrier' + str(input_[\"carrier\"]) + 'data.csv') df.drop(['Unnamed: 0'], axis=1,", "str(carrier) + '.h5') # weights = model.get_weights() # fpkl = open('model-' + str(carrier)", "test_dataset = df.drop(train_dataset.index) # getting the stats train_stats = train_dataset.describe() train_stats.pop(\"ARRIVAL_DELAY\") train_stats =", "Commented out IPython magic to ensure Python compatibility. import matplotlib as mpl import", "install -q seaborn # !pip install tensorflow==2.0.0 # # Use some functions from", "import train_test_split, cross_val_score, cross_val_predict from scipy.optimize import curve_fit import warnings plt.rcParams[\"patch.force_edgecolor\"] = True", "# model.save('models/model-' + str(carrier) + '.h5') print('OK ' + str(carrier)) # let's create", "model.get_weights() # fpkl = open('model-' + str(carrier) + '-weights.pkl', 'wb') # pickle.dump(weights, fpkl,", "# \"dayOfWeek\": dayOfWeek, # \"carrier\": airline, # \"origin\": origin, # \"sd\": sd, #", "= dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) carrier = input_[\"carrier\"] for carr_ in carriers: # create a", "dest = 'SEA' sd = 200 ddelay = -10 sa = 800 dist", "test labels # train_labels = train_dataset.pop('ARRIVAL_DELAY') # test_labels = test_dataset.pop('ARRIVAL_DELAY') # # normalize", "# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) # early_history = model.fit(normed_train_data, train_labels, # epochs=EPOCHS, validation_split" ]
[ "], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)),", "models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)),", "primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel(", "models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images',", "'0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True,", "django.db import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial", "('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,", "bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False,", "verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page',", "options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,", "class Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ]", "to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, },", "primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+',", "migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')),", "False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False,", "models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ),", "related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'], 'abstract': False, },", "wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True,", "migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True", "max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'],", "migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption',", "Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations", "('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True,", "models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[", "on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image',", "migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ],", "import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages',", "bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date',", "from django.db import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration):", "serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False,", "import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore',", "fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)),", "Django 3.1.3 on 2020-12-03 07:17 from django.db import migrations, models import django.db.models.deletion import", "primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract':", "on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ],", "), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post", "django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies = [", "'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'),", "[ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr',", "fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract':", "# Generated by Django 3.1.3 on 2020-12-03 07:17 from django.db import migrations, models", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'], 'abstract': False,", "serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage',", "('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={", "models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering':", "), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)),", "dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage',", "'0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,", "editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')),", "date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel(", "models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies", "by Django 3.1.3 on 2020-12-03 07:17 from django.db import migrations, models import django.db.models.deletion", "to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[", "('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'], 'abstract':", "name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True,", "serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')),", "('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False,", "True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel(", "= [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro',", "import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial =", "('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr',", "'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True,", "}, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),", "('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',),", "('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body',", "[ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)),", "initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations =", "Generated by Django 3.1.3 on 2020-12-03 07:17 from django.db import migrations, models import", "modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'),", "}, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),", "name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={", "('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage',", "wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True,", "07:17 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class", "] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False,", "models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, },", "('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id',", "null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ],", "import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): initial = True dependencies =", "<reponame>Sinchard/myvideo # Generated by Django 3.1.3 on 2020-12-03 07:17 from django.db import migrations,", "on 2020-12-03 07:17 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields import", "= [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [ migrations.CreateModel( name='VideoIndexPage', fields=[", "parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('intro', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ),", "= True dependencies = [ ('wagtailcore', '0059_apply_collection_ordering'), ('wagtailimages', '0022_uploadedimage'), ] operations = [", "3.1.3 on 2020-12-03 07:17 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields", "], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "name='VideoPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro',", "2020-12-03 07:17 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields", "parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={", "to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ),", "('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('caption', models.CharField(blank=True, max_length=250)), ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.image')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE,", "False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='VideoPageGalleryImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order',", "('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='video.videopage')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), ]" ]
[ "i =='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1 else:", "# print('FILE WRITE ERROR') # file.close() # else: # print('FILE WRITE SUCCESS') #", "='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR", "# self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i", "IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt", "WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if __name__ == '__main__':", "='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS", "=basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList)", "self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src", ":: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp", "self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack", "+ self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic =", "i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif i", "+ self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR", "self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+'", "port = '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for", "icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,' # def ChangePort(self,newPort):", "= 'dst '+Ip+' and src '+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control", "port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port", "='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7", "+ self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic = self.Control + self.Out_red +", "or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and", "i in range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] +", "else: print('STRATEGY ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList:", "if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic ::", "='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq", "' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control + self.Out_default +", "ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for", "- '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr ::", "self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag = 1", "if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif", "ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp", ":: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog", "print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if __name__ ==", "= 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src", "WRITE SUCCESS') file.close() ''' if __name__ == '__main__': witer = ConfigWriter(22222,'192.168.3.128','192.168.3.129','192.168.3.255','ens34','00:0c:29:44:f4:4c') witer.NewConfig(999,('smuf_attack','land_attack','red'),('10.1.1.2','10.1.1.3'),'',1124) '''这里的参数我调试的时候乱改的'''", "-> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog", "='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl ::", "ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier", "=='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack':", "define import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def", "Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack", "arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip", "=len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i", "# file.write(config) # except IOError: # print('FILE WRITE ERROR') # file.close() # else:", "''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR')", "+ final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control", "basic =self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr +", "= '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port =", "='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment", "def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i == 'rst_attack':", "IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier(", "'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog", "print('STRATEGY ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for", "IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack", "* ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic", "=='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif i =='red':", "self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i in IpBanList:", "config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except", "+ self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic = self.Control +", ":: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip =", "# try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: #", "== 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack':", "in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build +=", "='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag", "self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl", "file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE", "print('FILE WRITE SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id): try: file =", "'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE WRITE ERROR') # file.close() #", "IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack", "= '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i", ":: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto", "self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr", "count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+'", "self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy", "+= self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag =", "'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build", "host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,'", "self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier =", "self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,'", "1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+','", "+ self.Classifier + self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic", "ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src", "self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic self.port = port", "def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList)", "'' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in", "ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if __name__ == '__main__': witer", "and syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list):", "file.write(config) # except IOError: # print('FILE WRITE ERROR') # file.close() # else: #", "ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr", "='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,'", "def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out", ":: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut", "self.basic = basic else: basic = self.Control + self.Out_red + self.dropLog + self.passLog", "for i in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack':", "self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic +=", "elif i =='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1", "self.Ip_strip basic += self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control", "in range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + '", "+ self.Ip_strip basic += self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id):", "def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close()", "+= self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if", "range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port += 'ic['", ":: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='->", "self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack ='src host", "final_list = Strategy + IpBanList port = '' for i in range(self.length): port", "'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out", "str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag ==", "self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic", "from define import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object):", "ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n'", "open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE WRITE ERROR') # file.close()", "basic = self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr", "= open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE WRITE ERROR') #", "self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter", "syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build=''", "= 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern", "''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default", "''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n'", "+='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control + self.Out_default + self.dropLog + self.passLog", "self.red_flag == 0: basic =self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier", "and src '+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL ::", "'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if", "strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i == 'rst_attack': self.Strategy_build+=", "file.close() # else: # print('FILE WRITE SUCCESS') # file.close() return config ''' def", "+ self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq +", "self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send", ":: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806", "IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port = '' for i", "IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier", "print('FILE WRITE ERROR') # file.close() # else: # print('FILE WRITE SUCCESS') # file.close()", "+ self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic", "+ IpBanList port = '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if", "WRITE ERROR') # file.close() # else: # print('FILE WRITE SUCCESS') # file.close() return", "self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic = self.Control", "WRITE SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w')", "+ self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic =", "port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port += 'ic[' +", "ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr ::", "self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack =", "i in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build", "self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n'", "+ self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else:", "'+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM", "self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack", "CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+'", "='dst udp port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack", "0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='->", "ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config)", "self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config", "=self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq", "-> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS", "+='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port += 'ic[' + str(i)", "self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag", "= 'rst,' self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+'", "self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,' # def ChangePort(self,newPort): #", "self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build +=", "elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif", "IOError: # print('FILE WRITE ERROR') # file.close() # else: # print('FILE WRITE SUCCESS')", "self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) #", "'+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy +", "timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n'", "ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr", "detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt ::", "'->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port = ''", "self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR self.basic", "self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if", "in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port +=", "i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n'", "return config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE", "IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list =", "#basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024)", "# file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE WRITE", "self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog ::", "= 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy:", "#strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack ='src", "-> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='->", "re from define import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class", "in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier", "self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7 or 19,'", "detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp", "# except IOError: # print('FILE WRITE ERROR') # file.close() # else: # print('FILE", "20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq ::", "for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)):", "'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern -", "class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red", "+ self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic self.port =", "'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8')", "'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024)", "7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+'", "'-)\\n' final_list = Strategy + IpBanList port = '' for i in range(self.length):", "range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n'", "+ ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control + self.Out_default", "ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n->", "port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control + self.Out_default + self.dropLog +", "self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build", "'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len", ":: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i", "# file.close() # else: # print('FILE WRITE SUCCESS') # file.close() return config '''", "+ self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素'''", "except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if", "= open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS')", "basic else: basic = self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier", "#IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList", "DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='->", "port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst", "'rst,' self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and", "+ self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq +", "self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def", "ERROR') # file.close() # else: # print('FILE WRITE SUCCESS') # file.close() return config", "=self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError:", "= basic else: basic = self.Control + self.Out_red + self.dropLog + self.passLog +", "ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len", "self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n'", "+= 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n'", "'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out ::", "i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port", "passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control + self.Out_default + self.dropLog", "self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip", "<filename>ConfigRouter.py import re from define import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red'", ":: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] ->", "'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if", "0: basic =self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr", "IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList:", "config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE", "file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if __name__ == '__main__': witer =", "for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src", ":: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port = '' for", "ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806", "'+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,' #", "self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic", "self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n'", "self.Classifier + self.arpr + self.arpq + self.Ip_strip basic += self.IpPrintR self.basic =basic self.port", "self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq", "self.Out_default + self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip", "[1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv", "file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() '''", "ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto", "in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list", "except IOError: # print('FILE WRITE ERROR') # file.close() # else: # print('FILE WRITE", "droped]\")\\n->Discard\\n' if IpPassList: for i in range(len(IpPassList)): port += 'ic[' + str(i) +", "20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n'", "__init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out ::", "self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n'", "try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE", "final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic =self.Control +", "']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0: basic", "self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic = self.Control + self.Out_red", "NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file", "'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) ->", "if IpPassList: for i in range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"['", "ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n'", "'dst '+Ip+' and src '+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control =", "import re from define import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' '''", "self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL ::", "self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i == 'rst_attack': self.Strategy_build+= self.rst_attack elif", "= 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click',", "# file.close() return config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except", "fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0", "= Strategy + IpBanList port = '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+'", "self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in", "IpPassList: for i in range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' +", "0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS", "i =='echo_attack': self.Strategy_build += self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i", "elif i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for", "ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else:", "if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in", "+ self.dropLog + self.passLog + self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR", "ERROR') if IpBanList: for i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i", "basic += self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control =", "'+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic", "self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port", "= 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst", "port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port #", "='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')'", "= port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port", "='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+' and", "# else: # print('FILE WRITE SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id):", "dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n'", "for i in IpPassList: self.Strategy_build+='src '+i+',' #IpClassfier self.Ip_Classfier = '->ic :: IPClassifier( '+self.Strategy_build+", "else: basic = self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier +", "+ ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag == 0:", "self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port", "elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack elif", "self.arpq ='cl[1] -> [1]arpq :: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n'", "= self.Control + self.Out_red + self.dropLog + self.passLog + self.Classifier + self.arpr +", "'''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try:", "# def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length", "self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file =", "=0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7 or 19,' self.smuf_attack", "Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog ::", "i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i", "self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n'", "if self.red_flag == 0: basic =self.Control + self.Out_default + self.dropLog + self.passLog +", "else: print('FILE WRITE SUCCESS') file.close() ''' if __name__ == '__main__': witer = ConfigWriter(22222,'192.168.3.128','192.168.3.129','192.168.3.255','ens34','00:0c:29:44:f4:4c')", ":: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy", ":: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: # file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') #", "self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build += self.land_attack", "ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog", "SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf)", "ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')-> cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='->", "for i in range(len(IpPassList)): port += 'ic[' + str(i) + ']->dropLog\\n->Print(\"[' + final_list[i]", "udp port 7 or 19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack =", "IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='->", "cl :: Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1]", "basic+=self.IpPrintR self.basic = basic else: basic = self.Control + self.Out_red + self.dropLog +", "timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst", "self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) ->", "try: file = open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE", "== 0: basic =self.Control + self.Out_default + self.dropLog + self.passLog + self.Classifier +", "+= self.IpPrintR self.basic =basic self.port = port '''添加了白名单(IpPassList),在学姐论文中看到好像队列的输入端口可以有多个,我是依据这一基础改的,具体的可以看一下队列元素''' def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL", "Strategy + IpBanList port = '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n'", "# print('FILE WRITE SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id): try: file", "19,' self.smuf_attack ='src host '+IpBrodCast+' and icmp,' self.land_attack = 'dst '+Ip+' and src", "='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n' self.IpPrintS ='-> IPPrint(\"send IP", "print('FILE WRITE SUCCESS') file.close() ''' if __name__ == '__main__': witer = ConfigWriter(22222,'192.168.3.128','192.168.3.129','192.168.3.255','ens34','00:0c:29:44:f4:4c') witer.NewConfig(999,('smuf_attack','land_attack','red'),('10.1.1.2','10.1.1.3'),'',1124)", "self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw ::", "'+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr :: IPFragmenter(300)\\n'", "='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw", ":: ARPQuerier('+Ip+','+Mac+')\\n->out;\\n' self.Set_IPAddr ='SetIPAddress('+IpDst+')' self.Ip_strip = 'cl[2]->Strip(14)\\n-> CheckIPHeader(CHECKSUM false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP", "import * ''' 'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac):", "file = open('click_'+str(id)+'.click', 'w',encoding='UTF-8') # file.write(config) # except IOError: # print('FILE WRITE ERROR')", "'+self.Strategy_build+ '-)\\n' final_list = Strategy + IpBanList port = '' for i in", "self.Strategy_build += self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR')", "self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic = self.Control + self.Out_red + self.dropLog", "open('click_'+id+'.click','w') file.write(conf) except IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close()", "IP detail\")\\n' self.IpOut ='-> arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst", "self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList:", "src '+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n'", "+ str(i) + ']->dropLog\\n->Print(\"[' + final_list[i] + ' passed]\")\\n->out\\n' port +='ic['+str(self.length)+']->'+self.IpRewriter+self.DecIpTTL+self.IpFragment+self.IpPrintS+'->passLog'+self.IpOut+'\\n' if self.red_flag", "count)\\n' self.passLog ='passLog :: ToIPSummaryDump(/root/log/passlog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n' self.Classifier ='FromDevice('+GateWay+')->", "ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default = 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red =", "='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL", "ControlSocket(tcp,'+newPort+')\\n' def strategy_init(self,Strategy:list,IpBanList:list,IpPassList:list): self.Strategy_build='' self.length =len(Strategy)+len(IpBanList)+len(IpPassList) for i in Strategy: if i ==", "IPAddrPairRewriter(pattern - '+IpDst+' 0 0)\\n' self.DecIpTTL ='-> dt :: DecIPTTL\\n' self.IpFragment ='-> fr", "def NewConfig(self,controlPort,Strategy,IpBanList,IpPassList,id): self.Control = 'CONTROL :: ControlSocket(tcp,'+str(controlPort)+')\\n' self.strategy_init(Strategy,IpBanList,IpPassList) config =self.basic+self.Ip_Classfier+self.port # try: #", "else: # print('FILE WRITE SUCCESS') # file.close() return config ''' def ConfigDefine(self,conf,id): try:", "'+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL :: ControlSocket(tcp,'+newPort+')\\n' def", "'rst_attack' 'echo_attack' 'smuf_attack' 'land_attack' 'red' ''' class ConfigWriter(object): def __init__(self,ControlPort,Ip,IpDst,IpBrodCast,GateWay,Mac): #basic self.Out_default =", "self.Classifier + self.arpr + self.arpq + self.Ip_strip basic+=self.IpPrintR self.basic = basic else: basic", "'+Ip+' and src '+Ip+' and syn,' # def ChangePort(self,newPort): # self.Control = 'CONTROL", "and icmp,' self.land_attack = 'dst '+Ip+' and src '+Ip+' and syn,' # def", "+= self.echo_attack elif i =='smuf_attack': self.Strategy_build += self.smuf_attack elif i =='land_attack': self.Strategy_build +=", "RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n' self.dropLog ='dropLog :: ToIPSummaryDump(/root/log/droplog,CONTENTS timestamp ip_src ip_dst ip_len ip_proto count)\\n'", "i in IpBanList: self.Strategy_build+='src '+i+',' if IpPassList: for i in IpPassList: self.Strategy_build+='src '+i+','", "false)\\n->CheckLength(65535)\\n' self.IpPrintR ='-> IPPrint(\"recv IP detail\")\\n' self.IpRewriter ='rw :: IPAddrPairRewriter(pattern - '+IpDst+' 0", "=='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY ERROR') if IpBanList: for i in", "IOError: print('FILE WRITE ERROR') file.close() else: print('FILE WRITE SUCCESS') file.close() ''' if __name__", "Classifier(12/0806 20/0001,12/0806 20/0002,12/0800)\\n' self.arpr ='-> arpr :: ARPResponder('+Ip+' '+Mac+')\\n->out;\\n' self.arpq ='cl[1] -> [1]arpq", "=='land_attack': self.Strategy_build += self.land_attack elif i =='red': self.red_flag = 1 self.length=self.length-1 else: print('STRATEGY", "= 'out :: Queue(1024) -> ToDevice('+GateWay+')\\n' self.Out_red = 'out :: RED(768,1024,0.02)->Queue(1024) -> ToDevice('+GateWay+')\\n'", "file.close() return config ''' def ConfigDefine(self,conf,id): try: file = open('click_'+id+'.click','w') file.write(conf) except IOError:", "IpBanList port = '' for i in range(self.length): port +='ic['+str(i)+']->dropLog\\n->Print(\"['+final_list[i]+' droped]\")\\n->Discard\\n' if IpPassList:", "arpq;\\n' self.red_flag =0 #strategy self.rst_attack = 'rst,' self.echo_attack ='dst udp port 7 or", "i == 'rst_attack': self.Strategy_build+= self.rst_attack elif i =='echo_attack': self.Strategy_build += self.echo_attack elif i" ]
[ "get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only", "reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True: try: for post in subreddit.hot(limit=25):", "not in commented and any(x in post.title.lower() for x in keywords) or post", "round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\")", "f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep / 60;", "f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w') as f: pass return", "for x in keywords) or post not in commented and keywords[1] in post.link_flair_text):", "return l except FileNotFoundError: with open('comment.db', 'w') as f: pass return [] def", "sleep(1) while True: try: for post in subreddit.hot(limit=25): if (post not in commented", "return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit =", "{str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6", "except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600) if", "for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for", "line in f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w') as f:", "in commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\")", "rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except:", "1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}]", "if (post not in commented and any(x in post.title.lower() for x in keywords)", "* import random def load_file(file): try: l = [] with open(file, 'r') as", "REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented =", "from utils.api import API from time import sleep from config import * import", "minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\")", "to_mins = rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\")", "f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep /", "for post in subreddit.hot(limit=25): if (post not in commented and any(x in post.title.lower()", "in subreddit.hot(limit=25): if (post not in commented and any(x in post.title.lower() for x", "True: try: for post in subreddit.hot(limit=25): if (post not in commented and any(x", "print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins = round(to_mins,", "open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins", "API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented", "while True: try: for post in subreddit.hot(limit=25): if (post not in commented and", "occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600) if __name__ ==", "account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit =", "x in keywords) or post not in commented and keywords[1] in post.link_flair_text): commented.append(post)", "in post.title.lower() for x in keywords) or post not in commented and keywords[1]", "= load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True: try:", "commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300,", "load_file(file): try: l = [] with open(file, 'r') as f: for line in", "with open(file, 'r') as f: for line in f: l.append(line.rstrip()) return l except", "time import sleep from config import * import random def load_file(file): try: l", "keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}')", "f: for line in f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w')", "subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True: try: for post", "from time import sleep from config import * import random def load_file(file): try:", "import random def load_file(file): try: l = [] with open(file, 'r') as f:", "commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS)", "l = [] with open(file, 'r') as f: for line in f: l.append(line.rstrip())", "or post not in commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a')", "False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while", "l except FileNotFoundError: with open('comment.db', 'w') as f: pass return [] def get_nft():", "post in subreddit.hot(limit=25): if (post not in commented and any(x in post.title.lower() for", "config import * import random def load_file(file): try: l = [] with open(file,", "datetime from utils.api import API from time import sleep from config import *", "print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz", "reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\")", "with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600);", "retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600) if __name__ == '__main__':", "try: for post in subreddit.hot(limit=25): if (post not in commented and any(x in", "subreddit.hot(limit=25): if (post not in commented and any(x in post.title.lower() for x in", "f: pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check()", "post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins", "'r') as f: for line in f: l.append(line.rstrip()) return l except FileNotFoundError: with", "/ 60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error", "print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600) if __name__", "API from time import sleep from config import * import random def load_file(file):", "= rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep)", "post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep =", "\"address\"] sleep(1) while True: try: for post in subreddit.hot(limit=25): if (post not in", "utils.api import API from time import sleep from config import * import random", "any(x in post.title.lower() for x in keywords) or post not in commented and", "account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\",", "= API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False", "reddit.read_only = False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"]", "open('comment.db', 'w') as f: pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET,", "random def load_file(file): try: l = [] with open(file, 'r') as f: for", "in commented and any(x in post.title.lower() for x in keywords) or post not", "try: l = [] with open(file, 'r') as f: for line in f:", "keywords) or post not in commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db',", "REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit", "= [\"wallet\", \"address\"] sleep(1) while True: try: for post in subreddit.hot(limit=25): if (post", "FileNotFoundError: with open('comment.db', 'w') as f: pass return [] def get_nft(): account =", "'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins =", "commented and any(x in post.title.lower() for x in keywords) or post not in", "keywords = [\"wallet\", \"address\"] sleep(1) while True: try: for post in subreddit.hot(limit=25): if", "and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote()", "sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600)", "= random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz", "sleep from config import * import random def load_file(file): try: l = []", "post.title.lower() for x in keywords) or post not in commented and keywords[1] in", "datetime import datetime from utils.api import API from time import sleep from config", "commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True:", "for line in f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w') as", "= False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1)", "60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred,", "600); to_mins = rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)}", "not in commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f:", "= [] with open(file, 'r') as f: for line in f: l.append(line.rstrip()) return", "sleep(500) print(\"+\") print(f\"[{datetime.now().replace(microsecond=0)}] zZz for 6 hours\") sleep(21600) if __name__ == '__main__': get_nft()", "as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep", "account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only =", "load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True: try: for", "account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords =", "as f: pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD)", "rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins = round(to_mins, 1)", "from config import * import random def load_file(file): try: l = [] with", "open(file, 'r') as f: for line in f: l.append(line.rstrip()) return l except FileNotFoundError:", "def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit)", "random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins = round(to_mins, 1) print(f\"zZz for", "def load_file(file): try: l = [] with open(file, 'r') as f: for line", "as f: for line in f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db',", "post.upvote() print(f'{post.title}') rndm_sleep = random.randint(300, 600); to_mins = rndm_sleep / 60; to_mins =", "'w') as f: pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME,", "import * import random def load_file(file): try: l = [] with open(file, 'r')", "import sleep from config import * import random def load_file(file): try: l =", "in f: l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w') as f: pass", "[] with open(file, 'r') as f: for line in f: l.append(line.rstrip()) return l", "in keywords) or post not in commented and keywords[1] in post.link_flair_text): commented.append(post) with", "to_mins = round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\")", "= round(to_mins, 1) print(f\"zZz for {str(to_mins)} minutes\") sleep(rndm_sleep) except: print(\"Error occurred, retrying.\") sleep(500)", "from datetime import datetime from utils.api import API from time import sleep from", "with open('comment.db', 'w') as f: pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID,", "= account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\") subreddit = reddit.subreddit(\"NFTsMarketplace\") keywords", "import datetime from utils.api import API from time import sleep from config import", "[\"wallet\", \"address\"] sleep(1) while True: try: for post in subreddit.hot(limit=25): if (post not", "import API from time import sleep from config import * import random def", "= reddit.subreddit(\"NFTsMarketplace\") keywords = [\"wallet\", \"address\"] sleep(1) while True: try: for post in", "except FileNotFoundError: with open('comment.db', 'w') as f: pass return [] def get_nft(): account", "pass return [] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit", "in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as f: f.write(f\"{str(post)}\\n\") post.reply(ETH_ADDRESS) post.upvote() print(f'{post.title}') rndm_sleep", "[] def get_nft(): account = API(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET, REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize()", "l.append(line.rstrip()) return l except FileNotFoundError: with open('comment.db', 'w') as f: pass return []", "and any(x in post.title.lower() for x in keywords) or post not in commented", "post not in commented and keywords[1] in post.link_flair_text): commented.append(post) with open('comment.db', 'a') as", "(post not in commented and any(x in post.title.lower() for x in keywords) or", "REDDIT_USERNAME, REDDIT_PASSWORD) account.shadowban_check() reddit = account.authorize() account.authorized(reddit) reddit.read_only = False commented = load_file(\"comment.db\")" ]
[ "lang == 'en': if(string[0] == 'Share'): return '0' new_string = string[0].rstrip(' Comments') while", "in recursive_items(d): flat_d[key] = value #returns timestamp in localtime conversion from linux timestamp", "''' Get most nested key:value pair of nested dict ''' for key, value", "def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id'])", "# num of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love", "of the post reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes", "1K others else: return newstring else: return string def url_strip(url): fullurl = url[0]", ") # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field()", "# #Philip and 1K others else: return newstring else: return string def url_strip(url):", "e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring =", "json d = json.loads(date[0]) #nested dict of features flat_d = dict() #only retain", "return newstring #Pamela, Luigi e altri 4 else: return string friends = newstring.count('", "= scrapy.Field() # num of shares url = scrapy.Field( output_processor=url_strip ) post_id =", "url[0] #catchin '&id=' is enough to identify the post i = fullurl.find('&id=') if", "newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and", "your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from", "enough to identify the post i = fullurl.find('&id=') if i != -1: return", "flat_d[key] = value #returns timestamp in localtime conversion from linux timestamp UTC return", "scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() # num of shares url =", "scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post reactions = scrapy.Field( output_processor=reactions_strip", "string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')]", "= newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip and 1K", "!= -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if", "= scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow =", "+ new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if", "if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i =", "coding: utf-8 -*- # Define here the models for your scraped items #", "= scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class", "== 'it': newstring = string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') !=", "scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love", "parse_date(date): import json d = json.loads(date[0]) #nested dict of features flat_d = dict()", "+ newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip and 1K others else:", "newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4 else: return string friends =", "else: return string friends = newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0]", "-1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang ==", "type(value) is dict: yield from recursive_items(value) else: yield (key, value) for key, value", "+ newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): #", "') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')]", "output_processor=Join(separator=u'') ) # full text of the post comments = scrapy.Field( output_processor=comments_strip )", "newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134 #", "#catch albums i = fullurl.find('/albums/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0]", "dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field()", "newstring = string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring", "int(newstring) + friends # #Philip and 1K others else: return newstring else: return", "str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict of features return", "scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field()", "= scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share = scrapy.Field() # num", "= string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return", "string def url_strip(url): fullurl = url[0] #catchin '&id=' is enough to identify the", "post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field()", "def comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') != -1:", "recursive_items(dictionary): ''' Get most nested key:value pair of nested dict ''' for key,", "scrapy.Field( # when was the post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'')", "output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url =", "source = scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full", "other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') + newstring.count(',')", "scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share = scrapy.Field()", "most nested key:value pair of nested dict ''' for key, value in dictionary.items():", "output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh =", "else: #catch albums i = fullurl.find('/albums/') if i != -1: return fullurl[:i+8] +", "4 else: return string friends = newstring.count(' e ') + newstring.count(',') newstring =", "json.loads(date[0]) #nested dict of features flat_d = dict() #only retain 'leaves' of d", "scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of", "string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string", "value #returns timestamp in localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def", "the post i = fullurl.find('&id=') if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0]", "published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the", "else: return string[0].rstrip(' commenti') elif lang == 'en': if(string[0] == 'Share'): return '0'", "newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4 else: return string", "scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field()", ") # full text of the post reactions = scrapy.Field( output_processor=reactions_strip ) #", "#wow = scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share = scrapy.Field() #", "newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang == 'en': newstring = string[0] #19,298,873", "while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark", "sigh = scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() # num of shares", "shares url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from =", "fullurl.find('&id=') if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i", "= newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang == 'en': newstring", "reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field() #ahah", "= scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions", "'leaves' of d tree def recursive_items(dictionary): ''' Get most nested key:value pair of", "date = scrapy.Field( # when was the post published output_processor=parse_date ) text =", "friends elif lang == 'en': newstring = string[0] #19,298,873 if len(newstring.split()) == 1:", ") source_url = scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field()", "recursive_items(value) else: yield (key, value) for key, value in recursive_items(d): flat_d[key] = value", "of d tree def recursive_items(dictionary): ''' Get most nested key:value pair of nested", "class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') )", "of shares url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from", "#only retain 'leaves' of d tree def recursive_items(dictionary): ''' Get most nested key:value", "loader_context['lang'] if lang == 'it': newstring = string[0] #19.298.873 if len(newstring.split()) == 1:", "date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the", "d tree def recursive_items(dictionary): ''' Get most nested key:value pair of nested dict", "# Define here the models for your scraped items # # See documentation", "key:value pair of nested dict ''' for key, value in dictionary.items(): if type(value)", "+ newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring", "if(string[0] == 'Share'): return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1:", "import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': if", "return int(newstring) + friends # #Philip and 1K others else: return newstring else:", "-1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if i", "d = json.loads(date[0]) #nested dict of features flat_d = dict() #only retain 'leaves'", "= scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field() wow = scrapy.Field()", "= scrapy.Field() share = scrapy.Field() # num of shares url = scrapy.Field( output_processor=url_strip", "!= -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and other", "= newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring)", "= scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post reactions = scrapy.Field(", "#catch photos i = fullurl.find('/photos/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0]", "else: return fullurl def parse_date(date): import json d = json.loads(date[0]) #nested dict of", "newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return", "-1: return else: return string[0].rstrip(' commenti') elif lang == 'en': if(string[0] == 'Share'):", "d = json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source =", "datetime import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it':", "newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip and", "return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict of features", "= scrapy.Field( # when was the post published output_processor=parse_date ) text = scrapy.Field(", "string friends = newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.')", ") reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field(", "string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti') elif lang == 'en': if(string[0]", "post reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field(", "== 'Share'): return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string", "else: return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': newstring", "utf-8 -*- # Define here the models for your scraped items # #", "grrr = scrapy.Field() share = scrapy.Field() # num of shares url = scrapy.Field(", "if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date):", "while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return", "# #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and", "linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested", "== 1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring", "url = scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh", "= scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() # num of shares url", "output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source", "def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': newstring = string[0] #19.298.873", "= scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full text", "-1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4", "url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field()", "value in recursive_items(d): flat_d[key] = value #returns timestamp in localtime conversion from linux", "newstring else: return string def url_strip(url): fullurl = url[0] #catchin '&id=' is enough", "= scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh =", "num of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love =", "import TakeFirst, Join, MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context): lang =", "full text of the post reactions = scrapy.Field( output_processor=reactions_strip ) # num of", ") # full text of the post comments = scrapy.Field( output_processor=comments_strip ) reactions", "= scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr =", "from recursive_items(value) else: yield (key, value) for key, value in recursive_items(d): flat_d[key] =", "!= -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string def", "likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field() #ahah =", "reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field() wow", "new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string", "if len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:]", "= dict() #only retain 'leaves' of d tree def recursive_items(dictionary): ''' Get most", "num of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url =", "lang = loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') != -1: return else:", "in localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json", "of features flat_d = dict() #only retain 'leaves' of d tree def recursive_items(dictionary):", "== 1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring", "#love = scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share", "!= -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if", "= fullurl.find('&id=') if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos", "while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring)", "# num of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url", "of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field()", "newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:]", "of the post comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip )", "# return int(newstring) + friends # #Philip and 1K others else: return newstring", "flat_d = dict() #only retain 'leaves' of d tree def recursive_items(dictionary): ''' Get", "identify the post i = fullurl.find('&id=') if i != -1: return fullurl[:i+4] +", "for key, value in dictionary.items(): if type(value) is dict: yield from recursive_items(value) else:", "See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose", "i = fullurl.find('/photos/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch", "+ fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if i != -1: return", "text of the post reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions", "if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i =", "elif lang == 'en': newstring = string[0] #19,298,873 if len(newstring.split()) == 1: while", "return new_string else: return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang ==", "is dict: yield from recursive_items(value) else: yield (key, value) for key, value in", "for key, value in recursive_items(d): flat_d[key] = value #returns timestamp in localtime conversion", "= newstring.count(' and ') + newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',')", "= scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post comments = scrapy.Field(", "#Pamela, Luigi e altri 4 else: return string friends = newstring.count(' e ')", "the post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) # full text", "CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was the post", "shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( #", "'en': newstring = string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') != -1:", "# elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') + newstring.count(',') # newstring", "fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if i != -1: return fullurl[:i+8]", "i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/')", "share = scrapy.Field() # num of shares url = scrapy.Field( output_processor=url_strip ) post_id", "!= -1: return else: return string[0].rstrip(' commenti') elif lang == 'en': if(string[0] ==", "newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') + newstring.count(',') # newstring = newstring.split()[::-1][1]", "= scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when", "class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was the", "conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d =", "Get most nested key:value pair of nested dict ''' for key, value in", "output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date =", "Luigi e altri 4 else: return string friends = newstring.count(' e ') +", "#nested dict of features flat_d = dict() #only retain 'leaves' of d tree", "= fullurl.find('/albums/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl", "loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip('", "def url_strip(url): fullurl = url[0] #catchin '&id=' is enough to identify the post", "items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import", "''' for key, value in dictionary.items(): if type(value) is dict: yield from recursive_items(value)", "-1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context):", "return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text = scrapy.Field(", "#sigh = scrapy.Field() #grrr = scrapy.Field() #share = scrapy.Field() # num of shares", "timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict", "scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date", "len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return", "# num of shares url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip", "Define here the models for your scraped items # # See documentation in:", "int(newstring) + friends elif lang == 'en': newstring = string[0] #19,298,873 if len(newstring.split())", "from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0])", "= url[0] #catchin '&id=' is enough to identify the post i = fullurl.find('&id=')", "#19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] +", "#Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ')", "return string def url_strip(url): fullurl = url[0] #catchin '&id=' is enough to identify", "datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta')", "(key, value) for key, value in recursive_items(d): flat_d[key] = value #returns timestamp in", "the post reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes =", "fullurl.find('/albums/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def", "return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': newstring =", "new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:]", "retain 'leaves' of d tree def recursive_items(dictionary): ''' Get most nested key:value pair", "post i = fullurl.find('&id=') if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else:", "import json d = json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item):", "scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors", "#Philip and 1K others else: return newstring else: return string def url_strip(url): fullurl", "and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') +", "!= -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang", "fullurl.find('/photos/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i", "albums i = fullurl.find('/albums/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else:", "FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) #", "import json d = json.loads(date[0]) #nested dict of features flat_d = dict() #only", "str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'')", "len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return", "full text of the post comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field(", "+ newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4 else: return string friends", "was the post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) # full", "== 'it': if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti') elif lang", "= scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() # num", "documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from", "in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime", "dict ''' for key, value in dictionary.items(): if type(value) is dict: yield from", "# -*- coding: utf-8 -*- # Define here the models for your scraped", "scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import datetime, timedelta def", "nested dict ''' for key, value in dictionary.items(): if type(value) is dict: yield", "scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context): lang", "id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class", "= scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was the post published output_processor=parse_date", "+ newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] +", "to identify the post i = fullurl.find('&id=') if i != -1: return fullurl[:i+4]", "output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah =", "1: while newstring.rfind(',') != -1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring #", "post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of", "# See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join,", "when was the post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) #", "scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh", "+ friends # #Philip and 1K others else: return newstring else: return string", "lang == 'it': if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti') elif", "TakeFirst, Join, MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang']", "return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')]", "= string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring =", "# friends = newstring.count(' and ') + newstring.count(',') # newstring = newstring.split()[::-1][1] #", "else: return newstring else: return string def url_strip(url): fullurl = url[0] #catchin '&id='", "== 'en': newstring = string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') !=", "tree def recursive_items(dictionary): ''' Get most nested key:value pair of nested dict '''", "'en': if(string[0] == 'Share'): return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') !=", "dict() #only retain 'leaves' of d tree def recursive_items(dictionary): ''' Get most nested", "+ fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if i != -1: return", "+ newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang == 'en': newstring = string[0]", "= loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') != -1: return else: return", "dictionary.items(): if type(value) is dict: yield from recursive_items(value) else: yield (key, value) for", "photos i = fullurl.find('/photos/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else:", "recursive_items(d): flat_d[key] = value #returns timestamp in localtime conversion from linux timestamp UTC", "scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field()", "-1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends #", ") # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field()", "timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') !=", "num of shares url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip )", "features flat_d = dict() #only retain 'leaves' of d tree def recursive_items(dictionary): '''", "= scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post", "= string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',') != -1: newstring =", "key, value in recursive_items(d): flat_d[key] = value #returns timestamp in localtime conversion from", "= json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field()", "return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if i !=", "scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item):", "lang = loader_context['lang'] if lang == 'it': newstring = string[0] #19.298.873 if len(newstring.split())", "else: yield (key, value) for key, value in recursive_items(d): flat_d[key] = value #returns", "dict of features flat_d = dict() #only retain 'leaves' of d tree def", "'it': if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti') elif lang ==", "# newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip", "while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi", "e altri 4 else: return string friends = newstring.count(' e ') + newstring.count(',')", "scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) ahah", "= scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field()", "fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch photos i = fullurl.find('/photos/') if i != -1:", "from datetime import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang ==", "new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang", "wow = scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() #", "string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')]", "i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import", "newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) +", "if type(value) is dict: yield from recursive_items(value) else: yield (key, value) for key,", "return string[0].rstrip(' commenti') elif lang == 'en': if(string[0] == 'Share'): return '0' new_string", "localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d", "fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json d = json.loads(date[0]) #nested dict", "timestamp in localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import", "url_strip(url): fullurl = url[0] #catchin '&id=' is enough to identify the post i", "= scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share =", "1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela,", "reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip", "= newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134 # elif", "'0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] +", "def parse_date(date): import json d = json.loads(date[0]) #nested dict of features flat_d =", "of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text", "scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post comments = scrapy.Field( output_processor=comments_strip", "here the models for your scraped items # # See documentation in: #", "+ friends elif lang == 'en': newstring = string[0] #19,298,873 if len(newstring.split()) ==", "newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit():", "for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy", "friends # #Philip and 1K others else: return newstring else: return string def", "output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes =", "others else: return newstring else: return string def url_strip(url): fullurl = url[0] #catchin", "def recursive_items(dictionary): ''' Get most nested key:value pair of nested dict ''' for", "fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if i != -1:", "= scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share =", "output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post", "'it': newstring = string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') != -1:", "fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if i != -1: return fullurl[:i+8]", "!= -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri", "string[0].rstrip(' commenti') elif lang == 'en': if(string[0] == 'Share'): return '0' new_string =", "= fullurl.find('/photos/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums", "-*- coding: utf-8 -*- # Define here the models for your scraped items", "friends = newstring.count(' and ') + newstring.count(',') # newstring = newstring.split()[::-1][1] # while", "post comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) # num", "scrapy.Field() # num of shares url = scrapy.Field( output_processor=url_strip ) post_id = scrapy.Field(", "scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was", "#returns timestamp in localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id):", "text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post reactions =", "= json.loads(date[0]) #nested dict of features flat_d = dict() #only retain 'leaves' of", "# full text of the post comments = scrapy.Field( output_processor=comments_strip ) reactions =", "= value #returns timestamp in localtime conversion from linux timestamp UTC return str(datetime.fromtimestamp(flat_d['publish_time']))", "return int(newstring) + friends elif lang == 'en': newstring = string[0] #19,298,873 if", "reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': newstring = string[0] #19.298.873 if", "#catchin '&id=' is enough to identify the post i = fullurl.find('&id=') if i", "i = fullurl.find('&id=') if i != -1: return fullurl[:i+4] + fullurl[i+4:].split('&')[0] else: #catch", "= scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip )", "scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) # num of reactions likes", "source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was the post published", "models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import", "!= -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json", "if lang == 'it': newstring = string[0] #19.298.873 if len(newstring.split()) == 1: while", "string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': newstring = string[0]", "of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field()", "import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import datetime, timedelta", "else: #catch photos i = fullurl.find('/photos/') if i != -1: return fullurl[:i+8] +", "# newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')]", "'Share'): return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',') != -1: new_string =", "ahah = scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr", "scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field( # when was the post published output_processor=parse_date )", "# full text of the post reactions = scrapy.Field( output_processor=reactions_strip ) # num", "Join, MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if", "newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang == 'en': newstring =", "newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) +", "output_processor=Join(separator=u'') ) # full text of the post reactions = scrapy.Field( output_processor=reactions_strip )", "') + newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1: #", "value) for key, value in recursive_items(d): flat_d[key] = value #returns timestamp in localtime", "altri 4 else: return string friends = newstring.count(' e ') + newstring.count(',') newstring", "= new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context): lang =", "nested key:value pair of nested dict ''' for key, value in dictionary.items(): if", "yield from recursive_items(value) else: yield (key, value) for key, value in recursive_items(d): flat_d[key]", "scrapy.Field() share = scrapy.Field() # num of shares url = scrapy.Field( output_processor=url_strip )", "= newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1:", "# when was the post published output_processor=parse_date ) text = scrapy.Field( output_processor=Join(separator=u'') )", "commenti') elif lang == 'en': if(string[0] == 'Share'): return '0' new_string = string[0].rstrip('", "scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field()", "!= -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends", ") post_id = scrapy.Field( output_processor=id_strip ) shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source =", "'&id=' is enough to identify the post i = fullurl.find('&id=') if i !=", "yield (key, value) for key, value in recursive_items(d): flat_d[key] = value #returns timestamp", "while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends", "newstring = string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring", "return else: return string[0].rstrip(' commenti') elif lang == 'en': if(string[0] == 'Share'): return", "fullurl = url[0] #catchin '&id=' is enough to identify the post i =", "UTC return str(datetime.fromtimestamp(flat_d['publish_time'])) def id_strip(post_id): import json d = json.loads(post_id[::-1][0]) #nested dict of", "text of the post comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip", "in dictionary.items(): if type(value) is dict: yield from recursive_items(value) else: yield (key, value)", "# # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst,", "#ahah = scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr", "if len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:]", "Comments') while new_string.rfind(',') != -1: new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else:", "and 1K others else: return newstring else: return string def url_strip(url): fullurl =", "from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context):", "# while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return", "output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love =", "return string friends = newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while", "lang == 'en': newstring = string[0] #19,298,873 if len(newstring.split()) == 1: while newstring.rfind(',')", "scrapy.Field() text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post comments", "elif lang == 'en': if(string[0] == 'Share'): return '0' new_string = string[0].rstrip(' Comments')", "254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') + newstring.count(',') #", "fullurl def parse_date(date): import json d = json.loads(date[0]) #nested dict of features flat_d", "text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post comments =", ") shared_from = scrapy.Field() class CommentsItem(scrapy.Item): source = scrapy.Field() reply_to=scrapy.Field() date = scrapy.Field(", "== 'en': if(string[0] == 'Share'): return '0' new_string = string[0].rstrip(' Comments') while new_string.rfind(',')", "return newstring else: return string def url_strip(url): fullurl = url[0] #catchin '&id=' is", "newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4 else:", "newstring.count(' and ') + newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',') !=", "+ fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json d = json.loads(date[0]) #nested", "= loader_context['lang'] if lang == 'it': newstring = string[0] #19.298.873 if len(newstring.split()) ==", "json d = json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source", "source_url = scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field() #love = scrapy.Field() #wow", "value in dictionary.items(): if type(value) is dict: yield from recursive_items(value) else: yield (key,", "-1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if i", "pair of nested dict ''' for key, value in dictionary.items(): if type(value) is", "is enough to identify the post i = fullurl.find('&id=') if i != -1:", "scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share = scrapy.Field()", "newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip and 1K others else: return", "reply_to=scrapy.Field() date = scrapy.Field( # when was the post published output_processor=parse_date ) text", "-1: newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134", "new_string else: return string def reactions_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it':", "newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] +", "scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share = scrapy.Field() # num of", "lang == 'it': newstring = string[0] #19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.')", "#nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date =", "= newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e altri 4 else: return", "friends = newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') !=", "i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/')", "key, value in dictionary.items(): if type(value) is dict: yield from recursive_items(value) else: yield", "= scrapy.Field( output_processor=reactions_strip ) source_url = scrapy.Field() url = scrapy.Field() #ahah = scrapy.Field()", "likes = scrapy.Field( output_processor=reactions_strip ) ahah = scrapy.Field() love = scrapy.Field() wow =", "newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] #", ") text = scrapy.Field( output_processor=Join(separator=u'') ) # full text of the post reactions", "return fullurl def parse_date(date): import json d = json.loads(date[0]) #nested dict of features", "# https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import", "return newstring # #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends =", "-1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json d", "if lang == 'it': if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti')", "https://doc.scrapy.org/en/latest/topics/items.html import scrapy from scrapy.loader.processors import TakeFirst, Join, MapCompose from datetime import datetime,", "newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:] # return int(newstring) + friends # #Philip and 1K others", "comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) # num of", "comments_strip(string,loader_context): lang = loader_context['lang'] if lang == 'it': if string[0].rfind('Commenta') != -1: return", "-*- # Define here the models for your scraped items # # See", "return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json d =", "the post comments = scrapy.Field( output_processor=comments_strip ) reactions = scrapy.Field( output_processor=reactions_strip ) #", "i = fullurl.find('/albums/') if i != -1: return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return", "newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif lang == 'en':", "#19.298.873 if len(newstring.split()) == 1: while newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] +", "MapCompose from datetime import datetime, timedelta def comments_strip(string,loader_context): lang = loader_context['lang'] if lang", "new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context): lang = loader_context['lang']", ") ahah = scrapy.Field() love = scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field()", "if string[0].rfind('Commenta') != -1: return else: return string[0].rstrip(' commenti') elif lang == 'en':", "= scrapy.Field() #love = scrapy.Field() #wow = scrapy.Field() #sigh = scrapy.Field() #grrr =", "the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html", "love = scrapy.Field() wow = scrapy.Field() sigh = scrapy.Field() grrr = scrapy.Field() share", "scrapy.Field() #sigh = scrapy.Field() #grrr = scrapy.Field() #share = scrapy.Field() # num of", "newstring[newstring.rfind(',')+1:] return newstring # #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends", "dict: yield from recursive_items(value) else: yield (key, value) for key, value in recursive_items(d):", "newstring # #Mark and other 254,134 # elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count('", "= newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring = newstring[0:newstring.rfind(',')] + newstring[newstring.rfind(',')+1:]", "scrapy.Field( output_processor=reactions_strip ) # num of reactions likes = scrapy.Field( output_processor=reactions_strip ) source_url", "newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1: # newstring =", "else: return string def url_strip(url): fullurl = url[0] #catchin '&id=' is enough to", "json.loads(post_id[::-1][0]) #nested dict of features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date", "newstring #Pamela, Luigi e altri 4 else: return string friends = newstring.count(' e", "and ') + newstring.count(',') # newstring = newstring.split()[::-1][1] # while newstring.rfind(',') != -1:", "newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return newstring #Pamela, Luigi e", "of nested dict ''' for key, value in dictionary.items(): if type(value) is dict:", "newstring.count(' e ') + newstring.count(',') newstring = newstring.split()[::-1][0] while newstring.rfind('.') != -1: newstring", "features return str(d['top_level_post_id']) class FbcrawlItem(scrapy.Item): source = scrapy.Field() date = scrapy.Field() text =", "new_string = new_string[0:new_string.rfind(',')] + new_string[new_string.rfind(',')+1:] return new_string else: return string def reactions_strip(string,loader_context): lang", "fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: return fullurl def parse_date(date): import json d = json.loads(date[0])", "elif newstring.split()[::-1][1].isdigit(): # friends = newstring.count(' and ') + newstring.count(',') # newstring =", "return fullurl[:i+8] + fullurl[i+8:].split('/?')[0] else: #catch albums i = fullurl.find('/albums/') if i !=", "newstring.rfind('.') != -1: newstring = newstring[0:newstring.rfind('.')] + newstring[newstring.rfind('.')+1:] return int(newstring) + friends elif" ]
[ "from messagebird.base import Base class Webhook(Base): def __init__(self): self.url = None self.token =", "messagebird.base import Base class Webhook(Base): def __init__(self): self.url = None self.token = None" ]
[ "'\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' +", "stri += ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri", "been reworked to output to a separate filehandle pointing # to the socket", "str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":'", "I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__),", "dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class houses the", "1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data to the socket broadcast", "car_control class class dummy_car_control(): def __init__(self): ## Commented per jkridner's advice import car_control", "multiple sockets where they are used. import line_follower dc = dummy_car_control() f =", "print(\"Returning process\") return f.process # This class houses the car_control class class dummy_car_control():", "a separate filehandle pointing # to the socket 3004, output to the dashboard", "and executes the running code def init_filter(): ## Socket streams that were here", "def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer or", "self.fps) = \\ self.c.update(line) # Code has been reworked to output to a", "of #, stderr=subprocess.PIPE #responses from the system on commandline ) if __name__ ==", "pointing # to the socket 3004, output to the dashboard under 'Status' #", "are ## now moved to multiple sockets where they are used. import line_follower", "separate filehandle pointing # to the socket 3004, output to the dashboard under", "output below to be a JSON string stri = \"{\" if self.paused: stri", "-w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE #responses", "command run above. # This is what calls and executes the running code", ") if __name__ == \"__main__\": start_mjpg_streamer() # This method is called by the", "#import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification if either", "method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\",", "if line: stri += ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3])", "-p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of #,", "seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\",", "per jkridner's advice import car_control self.c = car_control.car_control() #Output for the status in", "self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri += ',", "line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code has been", "def __init__(self): ## Commented per jkridner's advice import car_control self.c = car_control.car_control() #Output", "\"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri", "allow visibility of #, stderr=subprocess.PIPE #responses from the system on commandline ) if", "the car_control class class dummy_car_control(): def __init__(self): ## Commented per jkridner's advice import", "# such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480", "socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification if", "JSON string stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri +=", "\\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) +", "output to the dashboard under 'Status' # Replaced the Status output below to", "\\ self.c.update(line) # Code has been reworked to output to a separate filehandle", "threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code has been reworked", "if process exits, # such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\",", "(self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code has been reworked to", "+= ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\",", "data to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return", "process\") return f.process # This class houses the car_control class class dummy_car_control(): def", "Replaced the Status output below to be a JSON string stri = \"{\"", "str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' +", "\" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented", "+ str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No Line\",", "import car_control self.c = car_control.car_control() #Output for the status in update method below", "else: stri += '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' + str(line[2]) +", "to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def", "def init_filter(): ## Socket streams that were here previously are ## now moved", "calls and executes the running code def init_filter(): ## Socket streams that were", "/usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #,", "# TODO: Detect any error if process exits, # such as the uvcvideo", "streams that were here previously are ## now moved to multiple sockets where", "such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter", "import os, sys, subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") #", "either mjpg-streamer or # cvfilter_py.so aren't installed # TODO: Detect any error if", "f.process # This class houses the car_control class class dummy_car_control(): def __init__(self): ##", "= dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class houses", "error if process exits, # such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\",", "in update method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,", "the system on commandline ) if __name__ == \"__main__\": start_mjpg_streamer() # This method", "the mjpg_streamer command run above. # This is what calls and executes the", "self.status_port)) # This filehandle sends the data to the socket broadcast self.status_file =", "self.c = car_control.car_control() #Output for the status in update method below self.status_port =", "This filehandle sends the data to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None)", "= self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle,", "start_mjpg_streamer() # This method is called by the mjpg_streamer command run above. #", "executes the running code def init_filter(): ## Socket streams that were here previously", "is called by the mjpg_streamer command run above. # This is what calls", "#, stderr=subprocess.PIPE #responses from the system on commandline ) if __name__ == \"__main__\":", "## Socket streams that were here previously are ## now moved to multiple", "',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\", end=\"\", flush=True, file=self.status_file) return \"\"", "self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code has been reworked to output", "the Status output below to be a JSON string stri = \"{\" if", "+ ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' +", "\"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri +=", "',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\", "str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}'", "= \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line:", "exits, # such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r", "tick(self): self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\", "Add notification if either mjpg-streamer or # cvfilter_py.so aren't installed # TODO: Detect", "where they are used. import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning", "else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle)", "+ str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\", end=\"\", flush=True,", "+ ', \"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"'", "filehandle pointing # to the socket 3004, output to the dashboard under 'Status'", "def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code", "houses the car_control class class dummy_car_control(): def __init__(self): ## Commented per jkridner's advice", "--filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE", "previously are ## now moved to multiple sockets where they are used. import", "to multiple sockets where they are used. import line_follower dc = dummy_car_control() f", "3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle", "os, sys, subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO:", "stri += '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' + str(line[2]) + ',", "stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":'", "now moved to multiple sockets where they are used. import line_follower dc =", "on commandline ) if __name__ == \"__main__\": start_mjpg_streamer() # This method is called", "reworked to output to a separate filehandle pointing # to the socket 3004,", "Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri +=", "= socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the", "the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self,", "\"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\", "method is called by the mjpg_streamer command run above. # This is what", "aren't installed # TODO: Detect any error if process exits, # such as", "output to a separate filehandle pointing # to the socket 3004, output to", "/usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE #responses from", "#Output for the status in update method below self.status_port = 3004 self.status_out =", "\"__main__\": start_mjpg_streamer() # This method is called by the mjpg_streamer command run above.", "dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class", "# Replaced the Status output below to be a JSON string stri =", "line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This", "subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification", "str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No", "mjpg_streamer command run above. # This is what calls and executes the running", "+= ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":'", "mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer or # cvfilter_py.so aren't installed", "', \"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri", "filehandle sends the data to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def", "self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends", "',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps)", "Commented per jkridner's advice import car_control self.c = car_control.car_control() #Output for the status", "dashboard under 'Status' # Replaced the Status output below to be a JSON", "process exits, # such as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so", "subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so", "#!/usr/bin/env python3 import os, sys, subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up", "'Status' # Replaced the Status output below to be a JSON string stri", "+= '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":'", "# cvfilter_py.so aren't installed # TODO: Detect any error if process exits, #", "stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":'", "\"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility", "3004, output to the dashboard under 'Status' # Replaced the Status output below", "#, stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE #responses from the system", "', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' +", "advice import car_control self.c = car_control.car_control() #Output for the status in update method", "string stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"'", "the socket 3004, output to the dashboard under 'Status' # Replaced the Status", "any error if process exits, # such as the uvcvideo crash I'm seeing", "running code def init_filter(): ## Socket streams that were here previously are ##", "== \"__main__\": start_mjpg_streamer() # This method is called by the mjpg_streamer command run", "status in update method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET,", "if __name__ == \"__main__\": start_mjpg_streamer() # This method is called by the mjpg_streamer", "as the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so", "return f.process # This class houses the car_control class class dummy_car_control(): def __init__(self):", "Socket streams that were here previously are ## now moved to multiple sockets", "__init__(self): ## Commented per jkridner's advice import car_control self.c = car_control.car_control() #Output for", "# This is what calls and executes the running code def init_filter(): ##", "\"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri += ', \"Line_X\":\"No", "is what calls and executes the running code def init_filter(): ## Socket streams", "class class dummy_car_control(): def __init__(self): ## Commented per jkridner's advice import car_control self.c", "Code has been reworked to output to a separate filehandle pointing # to", "# to the socket 3004, output to the dashboard under 'Status' # Replaced", "notification if either mjpg-streamer or # cvfilter_py.so aren't installed # TODO: Detect any", "by the mjpg_streamer command run above. # This is what calls and executes", "sys, subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add", "--fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE", "self.c.update(line) # Code has been reworked to output to a separate filehandle pointing", "to output to a separate filehandle pointing # to the socket 3004, output", "installed # TODO: Detect any error if process exits, # such as the", "called by the mjpg_streamer command run above. # This is what calls and", "broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self, line, threshold):", "car_control.car_control() #Output for the status in update method below self.status_port = 3004 self.status_out", "buffering=None) def tick(self): self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps)", "update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) # Code has", "stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) +", "under 'Status' # Replaced the Status output below to be a JSON string", "init_filter(): ## Socket streams that were here previously are ## now moved to", "= line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class houses the car_control class", "above. # This is what calls and executes the running code def init_filter():", "car_control self.c = car_control.car_control() #Output for the status in update method below self.status_port", "socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data", "the running code def init_filter(): ## Socket streams that were here previously are", "be a JSON string stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else:", "if self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri +=", "up mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer or # cvfilter_py.so aren't", "This is what calls and executes the running code def init_filter(): ## Socket", "the uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs", "# This class houses the car_control class class dummy_car_control(): def __init__(self): ## Commented", "from the system on commandline ) if __name__ == \"__main__\": start_mjpg_streamer() # This", "mjpg-streamer or # cvfilter_py.so aren't installed # TODO: Detect any error if process", "sends the data to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self):", "below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port))", "the data to the socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick()", "uvcvideo crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \"", "to a separate filehandle pointing # to the socket 3004, output to the", "+= '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' +", "640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"],", "socket 3004, output to the dashboard under 'Status' # Replaced the Status output", "to allow visibility of #, stderr=subprocess.PIPE #responses from the system on commandline )", "moved to multiple sockets where they are used. import line_follower dc = dummy_car_control()", "Status output below to be a JSON string stri = \"{\" if self.paused:", "or # cvfilter_py.so aren't installed # TODO: Detect any error if process exits,", "system on commandline ) if __name__ == \"__main__\": start_mjpg_streamer() # This method is", "were here previously are ## now moved to multiple sockets where they are", "+= ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' +", "self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) #", "# TODO: Add notification if either mjpg-streamer or # cvfilter_py.so aren't installed #", "code def init_filter(): ## Socket streams that were here previously are ## now", "cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer", "socket broadcast self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self, line,", "Detect any error if process exits, # such as the uvcvideo crash I'm", "class dummy_car_control(): def __init__(self): ## Commented per jkridner's advice import car_control self.c =", "stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if", "TODO: Add notification if either mjpg-streamer or # cvfilter_py.so aren't installed # TODO:", "line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class houses the car_control class class", "the dashboard under 'Status' # Replaced the Status output below to be a", "visibility of #, stderr=subprocess.PIPE #responses from the system on commandline ) if __name__", "= \\ self.c.update(line) # Code has been reworked to output to a separate", "',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\", end=\"\",", "stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE #responses from the", "they are used. import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\")", "socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data to", "#responses from the system on commandline ) if __name__ == \"__main__\": start_mjpg_streamer() #", "return def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line) #", "jkridner's advice import car_control self.c = car_control.car_control() #Output for the status in update", "', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri += ',", "Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":' + str(self.throttle) + ',\"Steering\":' + \\ str(self.steering)", "+ ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\", end=\"\", flush=True, file=self.status_file) return", "self.status_file = self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self, line, threshold): (self.paused,", "# This method is called by the mjpg_streamer command run above. # This", "This method is called by the mjpg_streamer command run above. # This is", "This class houses the car_control class class dummy_car_control(): def __init__(self): ## Commented per", "for the status in update method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET,", "# This filehandle sends the data to the socket broadcast self.status_file = self.status_out.makefile('w',", "here previously are ## now moved to multiple sockets where they are used.", "+ os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to", "stderr=subprocess.PIPE #responses from the system on commandline ) if __name__ == \"__main__\": start_mjpg_streamer()", "self.status_out.makefile('w', buffering=None) def tick(self): self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle, self.steering,", "# Code has been reworked to output to a separate filehandle pointing #", "str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri, \"\\r\", end=\"\", flush=True, file=self.status_file)", "start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer or #", "+= ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3]) else: stri +=", "f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process # This class houses the car_control", "+ \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold)", "sockets where they are used. import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc)", "class houses the car_control class class dummy_car_control(): def __init__(self): ## Commented per jkridner's", "socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data to the socket", "what calls and executes the running code def init_filter(): ## Socket streams that", "python3 import os, sys, subprocess, socket #import cgroups def start_mjpg_streamer(): print(\"Starting up mjpg_streamer.\")", "\"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri", "= car_control.car_control() #Output for the status in update method below self.status_port = 3004", "to the dashboard under 'Status' # Replaced the Status output below to be", "to be a JSON string stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"'", "def tick(self): self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) =", "that were here previously are ## now moved to multiple sockets where they", "import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process #", "stri += ',\"FPS\":' + str(self.fps) + ',\"Min_Threshold\":' + \\ str(threshold) + '}' print(stri,", "are used. import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return", "-r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w", "__name__ == \"__main__\": start_mjpg_streamer() # This method is called by the mjpg_streamer command", "## now moved to multiple sockets where they are used. import line_follower dc", "below to be a JSON string stri = \"{\" if self.paused: stri +=", "crash I'm seeing subprocess.run([\"mjpg_streamer\", \"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" +", "= 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This", "commandline ) if __name__ == \"__main__\": start_mjpg_streamer() # This method is called by", "print(\"Starting up mjpg_streamer.\") # TODO: Add notification if either mjpg-streamer or # cvfilter_py.so", "line: stri += ', \"Line_X\":' + str(line[2]) + ', \"Line_Y\":' + str(line[3]) else:", "TODO: Detect any error if process exits, # such as the uvcvideo crash", "\"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090", "self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data to the socket broadcast self.status_file", "self.c.tick() return def update(self, line, threshold): (self.paused, self.throttle, self.steering, self.fps) = \\ self.c.update(line)", "a JSON string stri = \"{\" if self.paused: stri += '\"Status\":\"Paused\"' else: stri", "used. import line_follower dc = dummy_car_control() f = line_follower.mjs_filter(dc) print(\"Returning process\") return f.process", "+ str(self.throttle) + ',\"Steering\":' + \\ str(self.steering) stri += ',\"FPS\":' + str(self.fps) +", "run above. # This is what calls and executes the running code def", "#Commented to allow visibility of #, stderr=subprocess.PIPE #responses from the system on commandline", "os.path.realpath(__file__), \"-o\", \"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow", "the status in update method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "dummy_car_control(): def __init__(self): ## Commented per jkridner's advice import car_control self.c = car_control.car_control()", "self.steering, self.fps) = \\ self.c.update(line) # Code has been reworked to output to", "has been reworked to output to a separate filehandle pointing # to the", "cvfilter_py.so aren't installed # TODO: Detect any error if process exits, # such", "self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.status_out.connect((\"\", self.status_port)) # This filehandle sends the data to the", "8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE", "to the socket 3004, output to the dashboard under 'Status' # Replaced the", "update method below self.status_port = 3004 self.status_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)", "'\"Status\":\"Paused\"' else: stri += '\"Status\":\"Unpaused\"' if line: stri += ', \"Line_X\":' + str(line[2])", "stdout=subprocess.PIPE #Commented to allow visibility of #, stderr=subprocess.PIPE #responses from the system on", "\"-i\", \"input_opencv.so -r 640x480 --filter /usr/lib/mjpg-streamer/cvfilter_py.so --fargs \" + os.path.realpath(__file__), \"-o\", \"output_http.so -p", "\"output_http.so -p 8090 -w /usr/share/mjpg-streamer/www\"], stdin=subprocess.PIPE #, stdout=subprocess.PIPE #Commented to allow visibility of", "if either mjpg-streamer or # cvfilter_py.so aren't installed # TODO: Detect any error", "+ str(line[3]) else: stri += ', \"Line_X\":\"No Line\", \"Line_Y\":\"No Line\"' stri += ',\"Throttle\":'", "## Commented per jkridner's advice import car_control self.c = car_control.car_control() #Output for the" ]
[ "if markets: for market, detail in markets.items(): if 'USD' in market: print(market) ohlcvs", "''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets: for market,", "60 * 1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24,", "False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets())", "since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets()", "limit=100) if ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers", "huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets: for market, detail in markets.items():", "ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol, gateio.fetch_ohlcv(symbol, '1d')) #", "gateio.load_markets() if markets: for market, detail in markets.items(): if 'USD' in market: print(market)", "timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets =", "print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol, gateio.fetch_ohlcv(symbol, '1d'))", "time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol, gateio.fetch_ohlcv(symbol,", "= gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol, gateio.fetch_ohlcv(symbol, '1d')) # one day", "if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for", "<filename>tests/ccxt_test.py # -*- coding: utf-8 -*- import time import ccxt gateio = ccxt.gateio({", "= bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id)", "gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol", "print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets: for market, detail in", "{'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id,", "ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs: t =", "= gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000)", "''' symbol = 'ETH/USD' timeframe = '5m' limit = 300 since = bitmex.milliseconds()", "{ 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe =", "= ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol =", "utf-8 -*- import time import ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080',", "print(gateio.id) markets = gateio.load_markets() if markets: for market, detail in markets.items(): if 'USD'", "in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in", "ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) '''", "= bitmex.milliseconds() - limit * 60 * 1000 params = {'partial': False} ret", "= gateio.load_markets() if markets: for market, detail in markets.items(): if 'USD' in market:", "'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe", "market, detail in markets.items(): if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d',", "ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT')", "in markets.items(): if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if", "- limit * 60 * 1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol,", "ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) '''", "'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe = '5m' limit = 300", "limit = 300 since = bitmex.milliseconds() - limit * 60 * 1000 params", "gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t,", "= '5m' limit = 300 since = bitmex.milliseconds() - limit * 60 *", "timeframe = '5m' limit = 300 since = bitmex.milliseconds() - limit * 60", "# -*- coding: utf-8 -*- import time import ccxt gateio = ccxt.gateio({ 'proxies':", "'5m' limit = 300 since = bitmex.milliseconds() - limit * 60 * 1000", "for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers)", "'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe = '5m' limit", "params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets:", "12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if", "# print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets: for market, detail", "ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol =", "params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) '''", "since = bitmex.milliseconds() - limit * 60 * 1000 params = {'partial': False}", "limit * 60 * 1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe,", "symbol = 'ETH/USD' timeframe = '5m' limit = 300 since = bitmex.milliseconds() -", "coding: utf-8 -*- import time import ccxt gateio = ccxt.gateio({ 'proxies': { 'http':", "print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs: t", "-*- import time import ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https':", "detail in markets.items(): if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100)", "1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret)", "-*- coding: utf-8 -*- import time import ccxt gateio = ccxt.gateio({ 'proxies': {", "t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT'", "'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe = '5m'", "markets: for market, detail in markets.items(): if 'USD' in market: print(market) ohlcvs =", "import time import ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080'", "= {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' #", "market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs:", "''' print(gateio.id) markets = gateio.load_markets() if markets: for market, detail in markets.items(): if", "in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol", "markets.items(): if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs:", "}, }) ''' symbol = 'ETH/USD' timeframe = '5m' limit = 300 since", "'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT', timeframe='1d', limit=100) if ohlcvs: for ohlcv", "ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) '''", "if ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers =", "print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets = gateio.load_markets() if markets: for", "tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol, gateio.fetch_ohlcv(symbol, '1d')) # one", "= 'ETH/USD' timeframe = '5m' limit = 300 since = bitmex.milliseconds() - limit", "bitmex.milliseconds() - limit * 60 * 1000 params = {'partial': False} ret =", "'ETH/USD' timeframe = '5m' limit = 300 since = bitmex.milliseconds() - limit *", "timeframe='1d', limit=100) if ohlcvs: for ohlcv in ohlcvs: t = time.localtime(ohlcv[0]/1000) print(t, ohlcv)", "import ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, })", "= time.localtime(ohlcv[0]/1000) print(t, ohlcv) tickers = gateio.fetch_ticker('BTC/USDT') print(tickers) ''' symbol = 'BTC/USDT' print(symbol,", "300 since = bitmex.milliseconds() - limit * 60 * 1000 params = {'partial':", "'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD' timeframe = '5m' limit =", "time import ccxt gateio = ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' },", "* 1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params)", "= 300 since = bitmex.milliseconds() - limit * 60 * 1000 params =", "bitmex.fetch_ohlcv(symbol, timeframe, since, 12*24, params) print(ret) ''' # print(huobi.id, huobi.load_markets()) ''' print(gateio.id) markets", "for market, detail in markets.items(): if 'USD' in market: print(market) ohlcvs = gateio.fetch_ohlcv('BTC/USDT',", "ccxt.gateio({ 'proxies': { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5h://127.0.0.1:1080' }, }) ''' symbol = 'ETH/USD'", "markets = gateio.load_markets() if markets: for market, detail in markets.items(): if 'USD' in", "* 60 * 1000 params = {'partial': False} ret = bitmex.fetch_ohlcv(symbol, timeframe, since,", "}) ''' symbol = 'ETH/USD' timeframe = '5m' limit = 300 since =" ]
[ "db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True", "Stamp folders (relationship): Folders owned by user cipers (relationship): Ciphers owned by user", "The master key Returns: str: The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt(", "'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object':", "data Args: :param self: This user :param data: The plain text to be", "== code): return True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user", "db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation of this object", "db.Model: The Model Base Class \"\"\" # Member Variables id = db.Column( db.String(64),", "import pyotp from app import db from models import funcs from lib.bitwarden import", "premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture =", "self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify the passed", "db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self):", "the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False", "cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data,", "of the last update Args: :param db.Model: The Model Base Class \"\"\" #", "encryption key security_stamp (str): Security Stamp folders (relationship): Folders owned by user cipers", "db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False,", "decrypt with the old key, then recrypt with the new key. Args: :param", "return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user", "Password Hint culture (str): Language/Country string totp_secret (str): Two Factor Authentication secret key", "{ 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint,", "= db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship(", "lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now()", "nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False", "is verified premium (bool): User's Premium Status master_password_hint (str): Master Password Hint culture", "enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] )", "User has Two Factor Authentication Enabled key (str): User's encryption key security_stamp (str):", "model contains an encrypted version of the encryption key. First decrypt that key", "key. So we will decrypt with the old key, then recrypt with the", "import sql class User(db.Model): \"\"\" This model is used to store users. Attributes:", "\"\"\" This model is used to store users. Attributes: id (int): User ID", "owned by user devices (relationship): Devices owned by user create_date (DateTime): Time when", "decrypt that key then encrypt the data Args: :param self: This user :param", "nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user',", "(relationship): Devices owned by user create_date (DateTime): Time when this user was created", "decrypt the encryption key Returns: bytes: The decrypted plain text as a byte", "key. First, decrypt the master key then decrypt the data. Args: :param self:", "'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations':", "the master key for the random encryption key. We want to preserve this", "old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password,", "\"\"\" return { 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium,", "key. First decrypt that key then encrypt the data Args: :param self: This", "(bool): User's Premium Status master_password_hint (str): Master Password Hint culture (str): Language/Country string", "ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user',", "'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code): \"\"\"", "self: This user :param data: The plain text to be encrypted :param master_key:", "the encryption key. First decrypt that key then encrypt the data Args: :param", "} def verifyOTP(self, code): \"\"\" Verify the passed in code against the user's", "(str): Master Password Hint culture (str): Language/Country string totp_secret (str): Two Factor Authentication", "a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt(", "passed in code against the user's current OTP. Args: :param1 self: This object", "with the new key. Args: :param self: This user :param old_password: The <PASSWORD>", "if the hashes are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def", "Functions def __repr__(self): \"\"\" Representation of this object as a string Args: :param", "sqlalchemy import sql class User(db.Model): \"\"\" This model is used to store users.", "codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def", "owned by user cipers (relationship): Ciphers owned by user devices (relationship): Devices owned", "default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' )", "The plain text to be encrypted :param master_key: The master key Returns: str:", "class User(db.Model): \"\"\" This model is used to store users. Attributes: id (int):", "string Args: :param self: This object Returns: str: String representation of object \"\"\"", "return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted", "cipher string that needs decrypted :param master_key: The master password used to decrypt", "Authentication secret key two_factor_enabled (bool): User has Two Factor Authentication Enabled key (str):", "Class \"\"\" # Member Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID )", "nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False,", "the random encryption key. We want to preserve this random encryption key. So", "encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version of the", "self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash):", "key for the random encryption key. We want to preserve this random encryption", ") def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version", "\"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key,", "code against the user's current OTP. Args: :param1 self: This object :param2 code:", "culture (str): Language/Country string totp_secret (str): Two Factor Authentication secret key two_factor_enabled (bool):", "First decrypt that key then encrypt the data Args: :param self: This user", "\"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64]", "object as a dict Args: :param self: This object Returns: dict: This object", ":param self: This user :param old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\"", "def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version of", "\"\"\" This function updates the master key for the random encryption key. We", "key. We want to preserve this random encryption key. So we will decrypt", "mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password hash matches", "Model Base Class \"\"\" # Member Variables id = db.Column( db.String(64), name='id', primary_key=True,", "from app import db from models import funcs from lib.bitwarden import Bitwarden from", ") return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if", ") self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>, self.email) self.security_stamp", "user :param data: The cipher string that needs decrypted :param master_key: The master", ":param master_key: The master password used to decrypt the encryption key Returns: bytes:", "= db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified =", "updateMasterKey(self, old_password, new_password): \"\"\" This function updates the master key for the random", "that key then encrypt the data Args: :param self: This user :param data:", "contains an encrypted version of the encryption key. First decrypt that key then", "Returns: bool: True if the hashes are the same, false otherwise. \"\"\" return", "object :param2 code: The passed in OTP Returns: bool: True if the codes", "self: The user :param in_hash: The hash to compare against Returns: bool: True", "User ID name (str): User's Name email (str): User's Email email_verified (bool): User's", "bytes: The decrypted plain text as a byte string \"\"\" enc_key = Bitwarden.decrypt(", "self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>, self.email) self.security_stamp =", "The cipher string that needs decrypted :param master_key: The master password used to", "'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey':", "Returns: bool: True if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code):", "db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation of this", "'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True )", "self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [],", "master_key: The master key Returns: str: The encrypted cipher string \"\"\" enc_key =", "from sqlalchemy import sql class User(db.Model): \"\"\" This model is used to store", "primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash =", "object as a string Args: :param self: This object Returns: str: String representation", "db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers", "string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32],", "the user's password hash matches the inputed one Args: :param self: The user", "backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(),", "Premium Status master_password_hint (str): Master Password Hint culture (str): Language/Country string totp_secret (str):", "random encryption key. We want to preserve this random encryption key. So we", "'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime,", "Factor Authentication Enabled key (str): User's encryption key security_stamp (str): Security Stamp folders", "self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data,", "user :param old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt(", "to decrypt the encryption key Returns: bytes: The decrypted plain text as a", "db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True)", ") def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password hash matches the", "Security Stamp folders (relationship): Folders owned by user cipers (relationship): Ciphers owned by", "server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\"", "in OTP Returns: bool: True if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now()", "self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile'", "master key for the random encryption key. We want to preserve this random", "OTP. Args: :param1 self: This object :param2 code: The passed in OTP Returns:", "two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp =", "Attributes: id (int): User ID name (str): User's Name email (str): User's Email", "(str): User's Email email_verified (bool): User's Email is verified premium (bool): User's Premium", "data: The plain text to be encrypted :param master_key: The master key Returns:", ":param in_hash: The hash to compare against Returns: bool: True if the hashes", "Name email (str): User's Email email_verified (bool): User's Email is verified premium (bool):", ") ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device',", "Args: :param self: This object Returns: dict: This object as a dict \"\"\"", "its encryption key. First, decrypt the master key then decrypt the data. Args:", "this object as a dict Args: :param self: This object Returns: dict: This", ":param1 self: This object :param2 code: The passed in OTP Returns: bool: True", "last update Args: :param db.Model: The Model Base Class \"\"\" # Member Variables", "self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code):", "object as a dict \"\"\" return { 'Id': self.id, 'Name': self.name, 'Email': self.email,", "used to decrypt the encryption key Returns: bytes: The decrypted plain text as", "def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password hash matches the inputed", "has Two Factor Authentication Enabled key (str): User's encryption key security_stamp (str): Security", "with the old key, then recrypt with the new key. Args: :param self:", "The user :param in_hash: The hash to compare against Returns: bool: True if", "verified premium (bool): User's Premium Status master_password_hint (str): Master Password Hint culture (str):", "Authentication Enabled key (str): User's encryption key security_stamp (str): Security Stamp folders (relationship):", "used to store users. Attributes: id (int): User ID name (str): User's Name", ") totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key", "is used to store users. Attributes: id (int): User ID name (str): User's", "Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self,", "master_password_hint (str): Master Password Hint culture (str): Language/Country string totp_secret (str): Two Factor", "db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean,", "Args: :param self: This user :param data: The cipher string that needs decrypted", "for the random encryption key. We want to preserve this random encryption key.", "we will decrypt with the old key, then recrypt with the new key.", "data, master_key): \"\"\" The user model contains an encrypted version of its encryption", "enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] )", "return True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains", "= db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def", "dict: This object as a dict \"\"\" return { 'Id': self.id, 'Name': self.name,", "from models import funcs from lib.bitwarden import Bitwarden from sqlalchemy import sql class", "Base Class \"\"\" # Member Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID", ":param data: The cipher string that needs decrypted :param master_key: The master password", "to compare against Returns: bool: True if the hashes are the same, false", "(bool): User has Two Factor Authentication Enabled key (str): User's encryption key security_stamp", "version of its encryption key. First, decrypt the master key then decrypt the", "key then encrypt the data Args: :param self: This user :param data: The", "encryption key. First decrypt that key then encrypt the data Args: :param self:", "devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date", "an encrypted version of the encryption key. First decrypt that key then encrypt", "'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify the", "self: This user :param data: The cipher string that needs decrypted :param master_key:", "This object Returns: str: String representation of object \"\"\" return '<User {}>'.format(self.name) def", "default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128),", "the passed in code against the user's current OTP. Args: :param1 self: This", "self: This object Returns: dict: This object as a dict \"\"\" return {", "import Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\" This model is used", "key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders =", "encryption key. So we will decrypt with the old key, then recrypt with", "The timestamp of the last update Args: :param db.Model: The Model Base Class", "as a dict \"\"\" return { 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified':", "Language/Country string totp_secret (str): Two Factor Authentication secret key two_factor_enabled (bool): User has", "user cipers (relationship): Ciphers owned by user devices (relationship): Devices owned by user", "data. Args: :param self: This user :param data: The cipher string that needs", "master key then decrypt the data. Args: :param self: This user :param data:", "dict \"\"\" return { 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium':", "db from models import funcs from lib.bitwarden import Bitwarden from sqlalchemy import sql", "This model is used to store users. Attributes: id (int): User ID name", "code: The passed in OTP Returns: bool: True if the codes match, false", "The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email),", "match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def decryptDataUsingMasterKey(self,", "then encrypt the data Args: :param self: This user :param data: The plain", "Folders owned by user cipers (relationship): Ciphers owned by user devices (relationship): Devices", "db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True )", "to store users. Attributes: id (int): User ID name (str): User's Name email", "totp_secret (str): Two Factor Authentication secret key two_factor_enabled (bool): User has Two Factor", "plain text to be encrypted :param master_key: The master key Returns: str: The", "= Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>, self.email) self.security_stamp = funcs.generateSecureUUID()", "Module contains the User Model \"\"\" import pyotp from app import db from", "db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder',", "from lib.bitwarden import Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\" This model", "hashes are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password,", "against the user's current OTP. Args: :param1 self: This object :param2 code: The", "comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password hash matches the inputed one", "\"\"\" The user model contains an encrypted version of the encryption key. First", "import funcs from lib.bitwarden import Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\"", "bool: True if the hashes are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash,", "__repr__(self): \"\"\" Representation of this object as a string Args: :param self: This", "totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key =", "= db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True,", "Returns: str: String representation of object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\"", "User's Name email (str): User's Email email_verified (bool): User's Email is verified premium", "code): \"\"\" Verify the passed in code against the user's current OTP. Args:", "Returns: str: The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64]", "Factor Authentication secret key two_factor_enabled (bool): User has Two Factor Authentication Enabled key", "of object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object as", "So we will decrypt with the old key, then recrypt with the new", "\"\"\" The user model contains an encrypted version of its encryption key. First,", "the master key then decrypt the data. Args: :param self: This user :param", "Devices owned by user create_date (DateTime): Time when this user was created update_date", "the new key. Args: :param self: This user :param old_password: The <PASSWORD> :param", "against Returns: bool: True if the hashes are the same, false otherwise. \"\"\"", "as a dict Args: :param self: This object Returns: dict: This object as", "db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False,", "password used to decrypt the encryption key Returns: bytes: The decrypted plain text", ") key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders", "Representation of this object as a string Args: :param self: This object Returns:", "db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False)", "False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version", "passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() )", "'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self,", "false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function", "\"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object as a dict", "update_date (DateTime): The timestamp of the last update Args: :param db.Model: The Model", ") folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher',", "'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' }", "old_password, new_password): \"\"\" This function updates the master key for the random encryption", "'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True )", "master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret =", "an encrypted version of its encryption key. First, decrypt the master key then", "<reponame>linkian209/PyBitWarden<gh_stars>0 \"\"\"models.user This Module contains the User Model \"\"\" import pyotp from app", "nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US'", "the old key, then recrypt with the new key. Args: :param self: This", "Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model", "dict Args: :param self: This object Returns: dict: This object as a dict", "funcs from lib.bitwarden import Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\" This", "True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an", "<PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt(", "the User Model \"\"\" import pyotp from app import db from models import", "(DateTime): The timestamp of the last update Args: :param db.Model: The Model Base", "enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password hash", "hash to compare against Returns: bool: True if the hashes are the same,", "def __repr__(self): \"\"\" Representation of this object as a string Args: :param self:", "= db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True,", "name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified", "devices (relationship): Devices owned by user create_date (DateTime): Time when this user was", "lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date =", "of the encryption key. First decrypt that key then encrypt the data Args:", "plain text as a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64]", "Email email_verified (bool): User's Email is verified premium (bool): User's Premium Status master_password_hint", "version of the encryption key. First decrypt that key then encrypt the data", "lib.bitwarden import Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\" This model is", "Member Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128),", "bool: True if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return", "decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version of its", "db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128),", "db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint =", "needs decrypted :param master_key: The master password used to decrypt the encryption key", "verifyOTP(self, code): \"\"\" Verify the passed in code against the user's current OTP.", "Enabled key (str): User's encryption key security_stamp (str): Security Stamp folders (relationship): Folders", "'<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object as a dict Args: :param", "the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\"", ":param2 code: The passed in OTP Returns: bool: True if the codes match,", "= db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column(", "backref='user', lazy=True, passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date", "# Functions def __repr__(self): \"\"\" Representation of this object as a string Args:", ":param self: The user :param in_hash: The hash to compare against Returns: bool:", "old key, then recrypt with the new key. Args: :param self: This user", "This user :param data: The plain text to be encrypted :param master_key: The", "password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium =", "encryption key. First, decrypt the master key then decrypt the data. Args: :param", "encrypt the data Args: :param self: This user :param data: The plain text", "return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object as a dict Args:", "when this user was created update_date (DateTime): The timestamp of the last update", "master key Returns: str: The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(),", "updates the master key for the random encryption key. We want to preserve", "= db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp = db.Column(", "in_hash: The hash to compare against Returns: bool: True if the hashes are", "want to preserve this random encryption key. So we will decrypt with the", "The user model contains an encrypted version of the encryption key. First decrypt", "= Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def", "Returns: dict: This object as a dict \"\"\" return { 'Id': self.id, 'Name':", "random encryption key. So we will decrypt with the old key, then recrypt", ":param self: This object Returns: str: String representation of object \"\"\" return '<User", "None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify", ":param self: This user :param data: The plain text to be encrypted :param", "db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column(", "Ciphers owned by user devices (relationship): Devices owned by user create_date (DateTime): Time", "= db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256),", "matches the inputed one Args: :param self: The user :param in_hash: The hash", "The user model contains an encrypted version of its encryption key. First, decrypt", "the data. Args: :param self: This user :param data: The cipher string that", "This Module contains the User Model \"\"\" import pyotp from app import db", "user :param in_hash: The hash to compare against Returns: bool: True if the", "The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return", "'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture':", "Hint culture (str): Language/Country string totp_secret (str): Two Factor Authentication secret key two_factor_enabled", "user :param data: The plain text to be encrypted :param master_key: The master", "folders (relationship): Folders owned by user cipers (relationship): Ciphers owned by user devices", "= Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email)", "premium (bool): User's Premium Status master_password_hint (str): Master Password Hint culture (str): Language/Country", "Returns: bytes: The decrypted plain text as a byte string \"\"\" enc_key =", "sql class User(db.Model): \"\"\" This model is used to store users. Attributes: id", "(str): Language/Country string totp_secret (str): Two Factor Authentication secret key two_factor_enabled (bool): User", "this user was created update_date (DateTime): The timestamp of the last update Args:", "will decrypt with the old key, then recrypt with the new key. Args:", "db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean,", "data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains", "mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted", "Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False)", "email (str): User's Email email_verified (bool): User's Email is verified premium (bool): User's", "create_date (DateTime): Time when this user was created update_date (DateTime): The timestamp of", "The master password used to decrypt the encryption key Returns: bytes: The decrypted", "'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp':", "The passed in OTP Returns: bool: True if the codes match, false otherwise.", "Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) )", "The hash to compare against Returns: bool: True if the hashes are the", "self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled,", "new key. Args: :param self: This user :param old_password: The <PASSWORD> :param new_password:", "create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions", "a string Args: :param self: This object Returns: str: String representation of object", "then decrypt the data. Args: :param self: This user :param data: The cipher", "\"\"\"models.user This Module contains the User Model \"\"\" import pyotp from app import", "passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices = db.relationship(", "if the user's password hash matches the inputed one Args: :param self: The", "contains an encrypted version of its encryption key. First, decrypt the master key", "User Model \"\"\" import pyotp from app import db from models import funcs", ") premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture", "The decrypted plain text as a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(),", "Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self,", "(DateTime): Time when this user was created update_date (DateTime): The timestamp of the", "as a string Args: :param self: This object Returns: str: String representation of", "encrypted version of its encryption key. First, decrypt the master key then decrypt", "two_factor_enabled (bool): User has Two Factor Authentication Enabled key (str): User's encryption key", "This user :param old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key =", "self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None,", "was created update_date (DateTime): The timestamp of the last update Args: :param db.Model:", "The Model Base Class \"\"\" # Member Variables id = db.Column( db.String(64), name='id',", "Compares if the user's password hash matches the inputed one Args: :param self:", "culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled =", "self: This object Returns: str: String representation of object \"\"\" return '<User {}>'.format(self.name)", "of its encryption key. First, decrypt the master key then decrypt the data.", "contains the User Model \"\"\" import pyotp from app import db from models", "True if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True", "def verifyOTP(self, code): \"\"\" Verify the passed in code against the user's current", "update Args: :param db.Model: The Model Base Class \"\"\" # Member Variables id", "the hashes are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self,", "db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False )", "str: String representation of object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns", "mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares", "nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID", "User(db.Model): \"\"\" This model is used to store users. Attributes: id (int): User", "db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False)", "encryption key. We want to preserve this random encryption key. So we will", "nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium", "the user's current OTP. Args: :param1 self: This object :param2 code: The passed", "the encryption key Returns: bytes: The decrypted plain text as a byte string", "= db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True", "key Returns: bytes: The decrypted plain text as a byte string \"\"\" enc_key", "self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture,", "users. Attributes: id (int): User ID name (str): User's Name email (str): User's", "enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an", "Model \"\"\" import pyotp from app import db from models import funcs from", "\"\"\" Returns this object as a dict Args: :param self: This object Returns:", ") devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now())", "\"\"\" Compares if the user's password hash matches the inputed one Args: :param", ") master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret", "key Returns: str: The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32],", "Master Password Hint culture (str): Language/Country string totp_secret (str): Two Factor Authentication secret", "to be encrypted :param master_key: The master key Returns: str: The encrypted cipher", "inputed one Args: :param self: The user :param in_hash: The hash to compare", "created update_date (DateTime): The timestamp of the last update Args: :param db.Model: The", "return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function updates the master", ") return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\" The", "'profile' } def verifyOTP(self, code): \"\"\" Verify the passed in code against the", "\"\"\" import pyotp from app import db from models import funcs from lib.bitwarden", "key security_stamp (str): Security Stamp folders (relationship): Folders owned by user cipers (relationship):", "funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function updates the master key", "to preserve this random encryption key. So we will decrypt with the old", "This object Returns: dict: This object as a dict \"\"\" return { 'Id':", "pyotp from app import db from models import funcs from lib.bitwarden import Bitwarden", "name (str): User's Name email (str): User's Email email_verified (bool): User's Email is", "encryption key Returns: bytes: The decrypted plain text as a byte string \"\"\"", "this object as a string Args: :param self: This object Returns: str: String", "this random encryption key. So we will decrypt with the old key, then", "lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices =", ":param old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key,", "model contains an encrypted version of its encryption key. First, decrypt the master", "nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled", "= db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date =", "data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's password", "user devices (relationship): Devices owned by user create_date (DateTime): Time when this user", "nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text,", "email_verified (bool): User's Email is verified premium (bool): User's Premium Status master_password_hint (str):", "new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key", "security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True,", "user model contains an encrypted version of the encryption key. First decrypt that", "return { 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint':", "recrypt with the new key. Args: :param self: This user :param old_password: The", "if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The", "default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False )", "secret key two_factor_enabled (bool): User has Two Factor Authentication Enabled key (str): User's", "db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column(", "default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation of this object as", "the data Args: :param self: This user :param data: The plain text to", "master_key): \"\"\" The user model contains an encrypted version of the encryption key.", "then recrypt with the new key. Args: :param self: This user :param old_password:", "Two Factor Authentication Enabled key (str): User's encryption key security_stamp (str): Security Stamp", "nullable=False, default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers =", "object Returns: dict: This object as a dict \"\"\" return { 'Id': self.id,", "decrypt the master key then decrypt the data. Args: :param self: This user", "This function updates the master key for the random encryption key. We want", "by user cipers (relationship): Ciphers owned by user devices (relationship): Devices owned by", "the last update Args: :param db.Model: The Model Base Class \"\"\" # Member", "import db from models import funcs from lib.bitwarden import Bitwarden from sqlalchemy import", "if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return", "default=False ) key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64), nullable=False, default=funcs.generateSecureUUID )", "one Args: :param self: The user :param in_hash: The hash to compare against", "(str): Security Stamp folders (relationship): Folders owned by user cipers (relationship): Ciphers owned", "false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def decryptDataUsingMasterKey(self, data,", ") # Functions def __repr__(self): \"\"\" Representation of this object as a string", "= Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def", "data: The cipher string that needs decrypted :param master_key: The master password used", "otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function updates", "current OTP. Args: :param1 self: This object :param2 code: The passed in OTP", "nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp", "security_stamp (str): Security Stamp folders (relationship): Folders owned by user cipers (relationship): Ciphers", "String representation of object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this", "user was created update_date (DateTime): The timestamp of the last update Args: :param", "= db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False", "Time when this user was created update_date (DateTime): The timestamp of the last", ") create_date = db.Column(db.DateTime(), server_default=sql.func.now()) update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) #", "db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True) culture = db.Column( db.String(64),", "that needs decrypted :param master_key: The master password used to decrypt the encryption", "This user :param data: The cipher string that needs decrypted :param master_key: The", "id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email", "compare against Returns: bool: True if the hashes are the same, false otherwise.", "user create_date (DateTime): Time when this user was created update_date (DateTime): The timestamp", "Args: :param self: This user :param old_password: The <PASSWORD> :param new_password: The <PASSWORD>", "User's Email email_verified (bool): User's Email is verified premium (bool): User's Premium Status", "def toHash(self): \"\"\" Returns this object as a dict Args: :param self: This", "password hash matches the inputed one Args: :param self: The user :param in_hash:", "by user devices (relationship): Devices owned by user create_date (DateTime): Time when this", "user model contains an encrypted version of its encryption key. First, decrypt the", "nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean,", "a dict Args: :param self: This object Returns: dict: This object as a", "enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password,", "models import funcs from lib.bitwarden import Bitwarden from sqlalchemy import sql class User(db.Model):", "Args: :param db.Model: The Model Base Class \"\"\" # Member Variables id =", "Two Factor Authentication secret key two_factor_enabled (bool): User has Two Factor Authentication Enabled", "in_hash): \"\"\" Compares if the user's password hash matches the inputed one Args:", "<PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None", "data, master_key): \"\"\" The user model contains an encrypted version of the encryption", "be encrypted :param master_key: The master key Returns: str: The encrypted cipher string", "Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if the user's", "str: The encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] )", "owned by user create_date (DateTime): Time when this user was created update_date (DateTime):", "text as a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] )", ":param self: This user :param data: The cipher string that needs decrypted :param", "object Returns: str: String representation of object \"\"\" return '<User {}>'.format(self.name) def toHash(self):", "This object as a dict \"\"\" return { 'Id': self.id, 'Name': self.name, 'Email':", "def updateMasterKey(self, old_password, new_password): \"\"\" This function updates the master key for the", "self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp, 'Organizations': [], 'Object': 'profile' } def", "passed in OTP Returns: bool: True if the codes match, false otherwise. \"\"\"", "master_key): \"\"\" The user model contains an encrypted version of its encryption key.", "self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash", "key, then recrypt with the new key. Args: :param self: This user :param", "master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key):", "mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def encryptDataUsingMasterKey(self, data, master_key): \"\"\"", "(int): User ID name (str): User's Name email (str): User's Email email_verified (bool):", "in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function updates the master key for", "encrypted :param master_key: The master key Returns: str: The encrypted cipher string \"\"\"", "Args: :param self: This object Returns: str: String representation of object \"\"\" return", "representation of object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object", "default=funcs.generateSecureUUID ) folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship(", "id (int): User ID name (str): User's Name email (str): User's Email email_verified", "key (str): User's encryption key security_stamp (str): Security Stamp folders (relationship): Folders owned", "email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False, default=False", "(relationship): Folders owned by user cipers (relationship): Ciphers owned by user devices (relationship):", "key then decrypt the data. Args: :param self: This user :param data: The", "of this object as a string Args: :param self: This object Returns: str:", "Returns this object as a dict Args: :param self: This object Returns: dict:", "\"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This function updates the", "cipers (relationship): Ciphers owned by user devices (relationship): Devices owned by user create_date", "= db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation of", "Email is verified premium (bool): User's Premium Status master_password_hint (str): Master Password Hint", "toHash(self): \"\"\" Returns this object as a dict Args: :param self: This object", "Verify the passed in code against the user's current OTP. Args: :param1 self:", "User's Premium Status master_password_hint (str): Master Password Hint culture (str): Language/Country string totp_secret", "self: This user :param old_password: The <PASSWORD> :param new_password: The <PASSWORD> \"\"\" enc_key", "= db.Column(db.Text, nullable=True) culture = db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256),", "OTP Returns: bool: True if the codes match, false otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() ==", "code): return True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model", "object \"\"\" return '<User {}>'.format(self.name) def toHash(self): \"\"\" Returns this object as a", "encrypted cipher string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt(", "decrypt the data. Args: :param self: This user :param data: The cipher string", "(str): User's encryption key security_stamp (str): Security Stamp folders (relationship): Folders owned by", "new_password): \"\"\" This function updates the master key for the random encryption key.", "Bitwarden.makeKey(old_password, self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash =", "Status master_password_hint (str): Master Password Hint culture (str): Language/Country string totp_secret (str): Two", "default=False ) premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint = db.Column(db.Text, nullable=True)", "Args: :param self: The user :param in_hash: The hash to compare against Returns:", "We want to preserve this random encryption key. So we will decrypt with", "update_date = db.Column( db.DateTime, default=sql.func.now(), onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation", "\"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data, enc_key[:32], mac_key=enc_key[32:64]", "store users. Attributes: id (int): User ID name (str): User's Name email (str):", "'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key':", "(bool): User's Email is verified premium (bool): User's Premium Status master_password_hint (str): Master", "folders = db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user',", ":param data: The plain text to be encrypted :param master_key: The master key", "a dict \"\"\" return { 'Id': self.id, 'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified,", "string that needs decrypted :param master_key: The master password used to decrypt the", "decrypted plain text as a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32],", "encrypted version of the encryption key. First decrypt that key then encrypt the", "the inputed one Args: :param self: The user :param in_hash: The hash to", "= db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email =", "backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True ) devices", "def decryptDataUsingMasterKey(self, data, master_key): \"\"\" The user model contains an encrypted version of", "master_key: The master password used to decrypt the encryption key Returns: bytes: The", "string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32],", "byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.decrypt( data,", "= db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column(", "'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify the passed in code against", "app import db from models import funcs from lib.bitwarden import Bitwarden from sqlalchemy", "passive_deletes=True ) devices = db.relationship( 'Device', backref='user', lazy=True, passive_deletes=True ) create_date = db.Column(db.DateTime(),", "name='id', primary_key=True, default=funcs.generateSecureUUID ) name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash", "decrypted :param master_key: The master password used to decrypt the encryption key Returns:", "'Name': self.name, 'Email': self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled':", "hash matches the inputed one Args: :param self: The user :param in_hash: The", "otherwise. \"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def decryptDataUsingMasterKey(self, data, master_key):", ":param new_password: The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None )", "\"\"\" # Member Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name", "master_key[:32], mac_key=master_key[32:64] ) return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\"", "text to be encrypted :param master_key: The master key Returns: str: The encrypted", ":param master_key: The master key Returns: str: The encrypted cipher string \"\"\" enc_key", "self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key, 'PrivateKey': None, 'SecurityStamp': self.security_stamp,", "User's Email is verified premium (bool): User's Premium Status master_password_hint (str): Master Password", "are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password):", "User's encryption key security_stamp (str): Security Stamp folders (relationship): Folders owned by user", "db.Column( db.Boolean, nullable=False, default=False ) key = db.Column(db.String(256), nullable=False) security_stamp = db.Column( db.String(64),", "by user create_date (DateTime): Time when this user was created update_date (DateTime): The", "\"\"\" Representation of this object as a string Args: :param self: This object", "key two_factor_enabled (bool): User has Two Factor Authentication Enabled key (str): User's encryption", "db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column( db.Boolean, nullable=False,", "user's password hash matches the inputed one Args: :param self: The user :param", "function updates the master key for the random encryption key. We want to", "db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False, default=False ) master_password_hint", "\"\"\" Verify the passed in code against the user's current OTP. Args: :param1", "in code against the user's current OTP. Args: :param1 self: This object :param2", "The <PASSWORD> \"\"\" enc_key = Bitwarden.decrypt( self.key, Bitwarden.makeKey(old_password, self.email), None ) self.key =", "= db.Column( db.String(64), nullable=False, default='en-US' ) totp_secret = db.Column(db.String(256), nullable=True) two_factor_enabled = db.Column(", "True if the hashes are the same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash)", "preserve this random encryption key. So we will decrypt with the old key,", "(str): Two Factor Authentication secret key two_factor_enabled (bool): User has Two Factor Authentication", "onupdate=sql.func.now() ) # Functions def __repr__(self): \"\"\" Representation of this object as a", ":param db.Model: The Model Base Class \"\"\" # Member Variables id = db.Column(", "Bitwarden from sqlalchemy import sql class User(db.Model): \"\"\" This model is used to", "self.email), None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>,", "= db.Column( db.Boolean, nullable=False, default=False ) premium = db.Column( db.Boolean, nullable=False, default=False )", "First, decrypt the master key then decrypt the data. Args: :param self: This", "None ) self.key = Bitwarden.encrypt( enc_key, Bitwarden.makeKey(new_password, self.email) ) self.password_hash = Bit<PASSWORD>en.hashPassword(<PASSWORD>, self.email)", ") name = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False)", "self: This object :param2 code: The passed in OTP Returns: bool: True if", "string totp_secret (str): Two Factor Authentication secret key two_factor_enabled (bool): User has Two", "user's current OTP. Args: :param1 self: This object :param2 code: The passed in", "\"\"\" if(pyotp.TOTP(self.totp_secret).now() == code): return True return False def decryptDataUsingMasterKey(self, data, master_key): \"\"\"", "{}>'.format(self.name) def toHash(self): \"\"\" Returns this object as a dict Args: :param self:", "timestamp of the last update Args: :param db.Model: The Model Base Class \"\"\"", "This object :param2 code: The passed in OTP Returns: bool: True if the", "model is used to store users. Attributes: id (int): User ID name (str):", "email = db.Column(db.String(128), nullable=False) password_hash = db.Column(db.String(128), nullable=False) email_verified = db.Column( db.Boolean, nullable=False,", "db.relationship( 'Folder', backref='user', lazy=True, passive_deletes=True ) ciphers = db.relationship( 'Cipher', backref='user', lazy=True, passive_deletes=True", "(relationship): Ciphers owned by user devices (relationship): Devices owned by user create_date (DateTime):", "'Organizations': [], 'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify the passed in", "[], 'Object': 'profile' } def verifyOTP(self, code): \"\"\" Verify the passed in code", "# Member Variables id = db.Column( db.String(64), name='id', primary_key=True, default=funcs.generateSecureUUID ) name =", "ID name (str): User's Name email (str): User's Email email_verified (bool): User's Email", "(str): User's Name email (str): User's Email email_verified (bool): User's Email is verified", "same, false otherwise. \"\"\" return funcs.constantTimeCompare(self.password_hash, in_hash) def updateMasterKey(self, old_password, new_password): \"\"\" This", "as a byte string \"\"\" enc_key = Bitwarden.decrypt( self.key.encode(), master_key[:32], mac_key=master_key[32:64] ) return", "Args: :param self: This user :param data: The plain text to be encrypted", "key. Args: :param self: This user :param old_password: The <PASSWORD> :param new_password: The", "self.email, 'EmailVerified': self.email_verified, 'Premium': self.premium, 'MasterPasswordHint': self.master_password_hint, 'Culture': self.culture, 'TwoFactorEnabled': self.two_factor_enabled, 'Key': self.key,", "Args: :param1 self: This object :param2 code: The passed in OTP Returns: bool:", "return Bitwarden.encrypt( data, enc_key[:32], mac_key=enc_key[32:64] ) def comparePasswordHash(self, in_hash): \"\"\" Compares if the", "master password used to decrypt the encryption key Returns: bytes: The decrypted plain", ":param self: This object Returns: dict: This object as a dict \"\"\" return" ]
[ "val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not", "feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count,", "all feedstock in a directory # Args: # path: Path to the feedstock", "import Validator from ..utils.paths import get_path app = Flask(__name__) # Function to accept", "path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from '\", path,", "ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed = True return {", "verbose: print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\") removed = [] count", "False. # verbose: Should status messages be printed? Default False. def accept_feedstock(path, remove_old=False,", "os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]:", "print(\"ERROR: Validation not cancelled. Feedstock may not have been removed.\") raise ValueError(result[\"message\"] +", "disable= not verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path,", "Must be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count", "not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation", "Default False. # verbose: Should status messages be printed? Default False. def accept_all(path=None,", "# remove_old: Should fully accepted feedstock be removed? Default False. # verbose: Should", "print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\") removed = [] count =", "remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total", "def accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path) as feedstock: val =", "verbose=False): if not path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock", "return { \"success\": True, \"source_deleted\": removed } # Function to accept all feedstock", "feedstock # remove_old: Should fully accepted feedstock be removed? Default False. # verbose:", "= 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must", "accepted feedstock be removed? Default False. # verbose: Should status messages be printed?", "to accept user-generated feedstock # Args: # path: Path to the feedstock #", "result.get(\"details\")) if remove_old: os.remove(path) removed = True return { \"success\": True, \"source_deleted\": removed", "removed? Default False. # verbose: Should status messages be printed? Default False. def", "# Function to accept all feedstock in a directory # Args: # path:", "directory # Args: # path: Path to the feedstock directory # remove_old: Should", "in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must be actual feedstock if", "Validator from ..utils.paths import get_path app = Flask(__name__) # Function to accept user-generated", "Should status messages be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed =", "actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1", "may not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old:", "be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path)", "count, \"total feedstock files\") return { \"success\": True, \"removed\": removed, \"total\": count }", "Path to the feedstock directory # remove_old: Should fully accepted feedstock be removed?", "printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path) as", "from tqdm import tqdm from ..validator.schema_validator import Validator from ..utils.paths import get_path app", "Function to accept all feedstock in a directory # Args: # path: Path", "Should fully accepted feedstock be removed? Default False. # verbose: Should status messages", "0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must be", "desc=\"Accepting feedstock\", disable= not verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"): result", "all feedstock from '\", path, \"'\", sep=\"\") removed = [] count = 0", "raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed = True return", "Validation not cancelled. Feedstock may not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\"", "directory # remove_old: Should fully accepted feedstock be removed? Default False. # verbose:", "import os import json from flask import Flask from tqdm import tqdm from", "removed = True return { \"success\": True, \"source_deleted\": removed } # Function to", "Default False. # verbose: Should status messages be printed? Default False. def accept_feedstock(path,", "open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" +", "1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\") return {", "# Must be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose)", "feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if", "removed = [] count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable=", "fully accepted feedstock be removed? Default False. # verbose: Should status messages be", "True, \"source_deleted\": removed } # Function to accept all feedstock in a directory", "have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed", "} # Function to accept all feedstock in a directory # Args: #", "status messages be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if not path:", "accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\",", "messages be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False with", "False. def accept_all(path=None, remove_old=False, verbose=False): if not path: path = get_path(__file__, \"submissions\") if", "sep=\"\") removed = [] count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\",", "not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path)", "feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock)", "accept all feedstock in a directory # Args: # path: Path to the", "not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not have", "result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\") return { \"success\": True,", "not cancelled. Feedstock may not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" +", "accept_all(path=None, remove_old=False, verbose=False): if not path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting", "= val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock", "\"'\", sep=\"\") removed = [] count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting", "from '\", path, \"'\", sep=\"\") removed = [] count = 0 for feedstock", "import Flask from tqdm import tqdm from ..validator.schema_validator import Validator from ..utils.paths import", "be removed? Default False. # verbose: Should status messages be printed? Default False.", "the feedstock directory # remove_old: Should fully accepted feedstock be removed? Default False.", "# Function to accept user-generated feedstock # Args: # path: Path to the", "not path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from '\",", "in a directory # Args: # path: Path to the feedstock directory #", "to the feedstock # remove_old: Should fully accepted feedstock be removed? Default False.", "os import json from flask import Flask from tqdm import tqdm from ..validator.schema_validator", "Function to accept user-generated feedstock # Args: # path: Path to the feedstock", "# Args: # path: Path to the feedstock directory # remove_old: Should fully", "def accept_all(path=None, remove_old=False, verbose=False): if not path: path = get_path(__file__, \"submissions\") if verbose:", "feedstock from '\", path, \"'\", sep=\"\") removed = [] count = 0 for", "count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\")", "if verbose: print(\"Accepted\", count, \"total feedstock files\") return { \"success\": True, \"removed\": removed,", "with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \"", "print(\"Accepted\", count, \"total feedstock files\") return { \"success\": True, \"removed\": removed, \"total\": count", "# Args: # path: Path to the feedstock # remove_old: Should fully accepted", "as feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path),", "feedstock in a directory # Args: # path: Path to the feedstock directory", "json from flask import Flask from tqdm import tqdm from ..validator.schema_validator import Validator", "res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not have been", "Path to the feedstock # remove_old: Should fully accepted feedstock be removed? Default", "= get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\")", "for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must be actual", "removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed = True", "verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not", "res = val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled.", "= [] count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not", "feedstock\", disable= not verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"): result =", "tqdm import tqdm from ..validator.schema_validator import Validator from ..utils.paths import get_path app =", "Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res", "get_path app = Flask(__name__) # Function to accept user-generated feedstock # Args: #", "accept user-generated feedstock # Args: # path: Path to the feedstock # remove_old:", "if not path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from", "get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\") removed", "= accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose:", "Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path) as feedstock:", "cancelled. Feedstock may not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\"))", "verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old,", "+ \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed = True return { \"success\":", "# path: Path to the feedstock # remove_old: Should fully accepted feedstock be", "to the feedstock directory # remove_old: Should fully accepted feedstock be removed? Default", "Should status messages be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if not", "remove_old=False, verbose=False): if not path: path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all", "in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if", "from flask import Flask from tqdm import tqdm from ..validator.schema_validator import Validator from", "tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"):", "..validator.schema_validator import Validator from ..utils.paths import get_path app = Flask(__name__) # Function to", "not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not have been removed.\") raise", "removed } # Function to accept all feedstock in a directory # Args:", "removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\") return { \"success\": True, \"removed\":", "[] count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose):", "path = get_path(__file__, \"submissions\") if verbose: print(\"Accepting all feedstock from '\", path, \"'\",", "False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path) as feedstock: val", "\"success\": True, \"source_deleted\": removed } # Function to accept all feedstock in a", "# path: Path to the feedstock directory # remove_old: Should fully accepted feedstock", "remove_old=False, verbose=False): removed = False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for", "path, \"'\", sep=\"\") removed = [] count = 0 for feedstock in tqdm(os.listdir(path),", "path: Path to the feedstock directory # remove_old: Should fully accepted feedstock be", "desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]:", "verbose: print(\"Accepted\", count, \"total feedstock files\") return { \"success\": True, \"removed\": removed, \"total\":", "feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable=", "not verbose): # Must be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock),", "verbose: Should status messages be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if", "remove_old: Should fully accepted feedstock be removed? Default False. # verbose: Should status", "..utils.paths import get_path app = Flask(__name__) # Function to accept user-generated feedstock #", "= False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock,", "tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if not", "disable= not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR:", "val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not have been removed.\") raise ValueError(result[\"message\"]", "if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\") return { \"success\":", "path: Path to the feedstock # remove_old: Should fully accepted feedstock be removed?", "status messages be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed = False", "\"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed = True return { \"success\": True,", "remove_old: os.remove(path) removed = True return { \"success\": True, \"source_deleted\": removed } #", "if verbose: print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\") removed = []", "Args: # path: Path to the feedstock directory # remove_old: Should fully accepted", "messages be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if not path: path", "app = Flask(__name__) # Function to accept user-generated feedstock # Args: # path:", "Args: # path: Path to the feedstock # remove_old: Should fully accepted feedstock", "\"submissions\") if verbose: print(\"Accepting all feedstock from '\", path, \"'\", sep=\"\") removed =", "to accept all feedstock in a directory # Args: # path: Path to", "'\", path, \"'\", sep=\"\") removed = [] count = 0 for feedstock in", "verbose: Should status messages be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False): removed", "line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line))", "False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting", "be actual feedstock if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count +=", "import tqdm from ..validator.schema_validator import Validator from ..utils.paths import get_path app = Flask(__name__)", "count = 0 for feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): #", "feedstock be removed? Default False. # verbose: Should status messages be printed? Default", "import json from flask import Flask from tqdm import tqdm from ..validator.schema_validator import", "True return { \"success\": True, \"source_deleted\": removed } # Function to accept all", "import get_path app = Flask(__name__) # Function to accept user-generated feedstock # Args:", "# verbose: Should status messages be printed? Default False. def accept_feedstock(path, remove_old=False, verbose=False):", "+= 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock files\") return", "Flask(__name__) # Function to accept user-generated feedstock # Args: # path: Path to", "flask import Flask from tqdm import tqdm from ..validator.schema_validator import Validator from ..utils.paths", "user-generated feedstock # Args: # path: Path to the feedstock # remove_old: Should", "verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if verbose: print(\"Accepted\", count, \"total feedstock", "Flask from tqdm import tqdm from ..validator.schema_validator import Validator from ..utils.paths import get_path", "{ \"success\": True, \"source_deleted\": removed } # Function to accept all feedstock in", "if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not", "been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if remove_old: os.remove(path) removed =", "os.remove(path) removed = True return { \"success\": True, \"source_deleted\": removed } # Function", "for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose): res =", "feedstock in tqdm(os.listdir(path), desc=\"Accepting feedstock\", disable= not verbose): # Must be actual feedstock", "Feedstock may not have been removed.\") raise ValueError(result[\"message\"] + \"\\n\" + result.get(\"details\")) if", "val.write_record(json.loads(line)) if not res[\"success\"]: if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may", "result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]: removed.append(feedstock) if", "tqdm from ..validator.schema_validator import Validator from ..utils.paths import get_path app = Flask(__name__) #", "be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if not path: path =", "feedstock # Args: # path: Path to the feedstock # remove_old: Should fully", "\" + os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if", "printed? Default False. def accept_all(path=None, remove_old=False, verbose=False): if not path: path = get_path(__file__,", "= True return { \"success\": True, \"source_deleted\": removed } # Function to accept", "\"source_deleted\": removed } # Function to accept all feedstock in a directory #", "a directory # Args: # path: Path to the feedstock directory # remove_old:", "from ..validator.schema_validator import Validator from ..utils.paths import get_path app = Flask(__name__) # Function", "= Flask(__name__) # Function to accept user-generated feedstock # Args: # path: Path", "if not val.cancel_validation()[\"success\"]: print(\"ERROR: Validation not cancelled. Feedstock may not have been removed.\")", "the feedstock # remove_old: Should fully accepted feedstock be removed? Default False. #", "verbose=False): removed = False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line", "+ os.path.basename(path), disable= not verbose): res = val.write_record(json.loads(line)) if not res[\"success\"]: if not", "feedstock directory # remove_old: Should fully accepted feedstock be removed? Default False. #", "# verbose: Should status messages be printed? Default False. def accept_all(path=None, remove_old=False, verbose=False):", "from ..utils.paths import get_path app = Flask(__name__) # Function to accept user-generated feedstock", "removed = False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline())) for line in", "if feedstock.endswith(\"_all.json\"): result = accept_feedstock(os.path.join(path, feedstock), remove_old=remove_old, verbose=verbose) count += 1 if result[\"source_deleted\"]:", "accept_feedstock(path, remove_old=False, verbose=False): removed = False with open(path) as feedstock: val = Validator(json.loads(feedstock.readline()))", "= Validator(json.loads(feedstock.readline())) for line in tqdm(feedstock, desc=\"Accepting \" + os.path.basename(path), disable= not verbose):", "False. # verbose: Should status messages be printed? Default False. def accept_all(path=None, remove_old=False,", "Default False. def accept_all(path=None, remove_old=False, verbose=False): if not path: path = get_path(__file__, \"submissions\")", "+ result.get(\"details\")) if remove_old: os.remove(path) removed = True return { \"success\": True, \"source_deleted\":", "if remove_old: os.remove(path) removed = True return { \"success\": True, \"source_deleted\": removed }" ]
[ "\"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25,", "{ \"shapley_batch_size\": { \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20", "20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\": 50, \"pointconv\":", "5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\":", "\"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": {", "50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\":", "{ \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\":", "\"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\": 50,", "CONFIG = { \"shapley_batch_size\": { \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10,", "{ \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\": 50, \"pointconv\": 100 } }", "5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\":", "\"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100,", "\"shapley_batch_size\": { \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 },", "<filename>config.py CONFIG = { \"shapley_batch_size\": { \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\":", "\"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25,", "= { \"shapley_batch_size\": { \"pointnet2\": 5, \"pointnet\": 50, \"dgcnn\": 5, \"gcnn\": 10, \"pointconv\":", "\"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\": 50, \"pointconv\": 100 }", "10, \"pointconv\": 20 }, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\":", "}, \"interaction_batch_size\": { \"pointnet2\": 25, \"pointnet\": 100, \"dgcnn\": 25, \"gcnn\": 50, \"pointconv\": 100" ]
[ "the feature wall is removed, but the geometry of the environment \\ remains", "data structures (DataFrame) and read_excel # Import module with class/functions handling pigeon procesing", "analysis based on the groups and animals\\ selected above.\") # Create and populate", "Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average", "goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial)", "= self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame", "make sure it is not \\ currently in use. Saving operation cancelled.\") elif", "group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\",", "where an extra wall and a \\ feature wall are placed in the", "in the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont))", "buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function", "quit instead?\") if (result == True): print \"Exiting program...\" exit() else: numError =", "def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame =", "[] # Create a button for each bird in the data directory for", "self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] # Create a button for each", "root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") # set the size of", "group.\", \"Control group 1\", \"Control group 2\", \"Group where an extra wall and", "GUI of this program can be found in the supplied \\ README file.", "try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if not dataDirname:", "one grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select", "\"animals\"): keys = self.animals else: keys = self.trialKeys # check which buttons are", "pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save each pigeon", "\"Saving output of chosen groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\",", "enclosed square.\", \"Group where the feature wall is removed, but the geometry of", "'-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName", "Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates", "import pandas as pd # import pandas data structures (DataFrame) and read_excel #", "\\ element within the GUI.\\n\\n\" self.createComponents() # function for creating the select all", "geometry of the environment \\ remains the same.\", \"Group where the feature wall", "Check to make sure it is not currently \\ in use. Since processing", "processing was \\ cancelled.\") # =============================================================================# # ==========main function for handling processing and", "# loop over each pigeon and acquire data matching requested trials for pigeon", "outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"]", "select all and de-select all buttons def allButtons(self, buttonGroup, event): for buttonNum in", "removed, but the geometry of the environment \\ remains the same.\", \"Group where", "gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] =", "\"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return", "(x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to save this pigeon to a", "allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please", "a frame for the bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True,", "trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial frame # sort", "list all files of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles)", "= \"\" allPigeons = {} allData = {} groupsForOutput = [] trialButtons =", "the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\")", "time # find the indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) #", "multipled attempts. Do you want to quit instead?\") if (result == True): print", "# cd to data directory chdir(dataDirname) # list all files of type .xls", "the selected data files occurred as usual, there was an issue \\ writing", "= [] # create all of the group buttons for num in range(len(self.trialLabels)):", "print \"Exiting program...\" exit() else: numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root,", "trialFrame = pd.DataFrame({}) # storage frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame", "anchor=W, side=LEFT) # Create a checkbox for each test group self.trialLabels = [\"Non-reinforced", "%0.0f units, \\ please wait...\" % (numFiles, calcForThreshold) startTime = time.time() # start", "in \" + outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data", "file in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) # now read excel", "selected\", \"Please select at least one grouping to analyze.\") elif (animalsForOutput == []):", "= self.animals else: keys = self.trialKeys # check which buttons are selected for", "\"experimental phases\") # Create a frame for handling all of the birds #", "the additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label", "analyzed def getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType == \"animals\"): keys", "placed in the environment to create an enclosed square.\", \"Group where the feature", "====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button runButton", "quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit", "== \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self): trialsForOutput", "locate the current directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname =", "processing of the selected data files occurred as usual, there was an issue", "occurred as usual, there was an issue \\ writing to the designated excel", "changed from default\" return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value", "text=\"Quit the program and close the GUI.\") def create_window(self): self.counter += 1 t", "# import pandas data structures (DataFrame) and read_excel # Import module with class/functions", "of the birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT)", "\\ issue writing to the designated excel file. Check to make sure it", "# function for parsing dataframe based on groups def analyzeGroups(self, trials, animals): outputFrames", "the program and close the GUI.\") def create_window(self): self.counter += 1 t =", "trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial frame # sort by", "each bird in the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird],", "to set a new threshold value \\ for calculating the max distance away", "phases\") # Create a frame for handling all of the birds # ======================================================================", "self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame,", "program was developed to automatically format input excel data for statistical analysis in", "elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one bird", "= ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the GUI.\") def create_window(self): self.counter", "= chosenName.replace('/', sep); if (chosenName != dirname + sep + initialFileName) and (\".xls\"", "(trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to", "Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500,", "delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets by \\ trial type.\") #", "= [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1]", "Create the title label title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont)", "of the \\ long wall.\"] self.trialVals = [] # create all of the", "tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one grouping to analyze.\") elif (animalsForOutput", "# Create and populate group and trial button frames # ====================================================================== trialFrame =", "thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\",", "in the statistical analysis software. \"\"\" from Tkinter import * from ttk import", "Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar", "= gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] =", "define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename)", "self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"]", "# Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox,", "allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected data files took %1.2f seconds.\"", "\\ processing of the selected data files occurred as usual, there was an", "expand=True, padx=100, pady=100) # function for determining which groups/animals will be analyzed def", "loaded into the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1", "are placed in the environment to create an enclosed square.\", \"Group where the", "[]): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one bird to analyze.\") def", "output of all selected data files located in \" + outputFilename + '.'", "section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run", "# function to also create a processed dataframe for each pigeon/trial def getFrame(self,", "for use \\ in the laboratory of Dr. <NAME>.\") # Create a canvas", "wall and a \\ feature wall are placed in the environment to create", "% self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for determining which groups/animals", "developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License,", "the selected data files took %1.2f seconds.\" % processingTime print \"\\nFormatted output of", "create the excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving", "self.trialVals = [] # create all of the group buttons for num in", "reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton,", "found. Please restart the program.\") # First read-in the data for file in", "buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals,", "Licensed under # GNU General Public License, Ver 2 from ToolTip import ToolTip", "files of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No", "you want to quit instead?\") if (result == True): print \"Exiting program...\" exit()", "group 2\", \"Group where an extra wall and a \\ feature wall are", "size of the window # Initialize variables toolTipDelay = 700 # ms defaultThreshold", "self.trialVals, \"experimental phases\") # Create a frame for handling all of the birds", "and Y coordinates option selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial", "AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial)", "now read excel file data into a DataFrame pigeonData = pd.read_excel(datafile) # extract", "of this program can be found in the supplied \\ README file. Tooltips", "progress progressTime = time.time() # update progress time # find the indices of", "into the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if", "writer = pd.ExcelWriter(chosenName) # create the excel writer object for frameIndex in outputFrames:", "= gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame,", "all and de-select all buttons def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)):", "excel file. Check to make sure it is not currently \\ in use.", "Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial ==", "defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\") thresholdBox.delete(0,", "# check which buttons are selected for buttonNum in buttons: if buttonNum.get(): indexOfButton", "\\ long wall.\"] self.trialVals = [] # create all of the group buttons", "allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) # now read excel file data", "(trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame =", "Frame, Style from os import chdir, path, sep import pandas as pd #", "0 progressTime = 0 # loop through all of the pigeons loaded into", "to automatically format input excel data for statistical analysis in the statistical analysis", "\"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y", "scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and components of the", "run the initial formatting on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for", "= ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups and animals\\ selected above.\")", "laboratory of Dr. <NAME>.\") # Create a canvas for drawing a separation line", "trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each pigeon and", "deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text + \" marked for", "files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing", "# Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial", "values, \\ this may not be an issue. Saving the output of initial", "\"Group where an extra wall and a \\ feature wall are placed in", "data files took %1.2f seconds.\" % processingTime print \"\\nFormatted output of all selected", "buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame for handling all of", "def create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l", "for calculating the max distance away from a goal to be kept for", "Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates option", "entry # convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName]", "chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue writing to the", "for determining which groups/animals will be analyzed def getGroups(self, buttons, groupType): groupsForOutput =", "+ '-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/',", "root = Tk() # create GUI root root.wm_title(\"Data Processor\") # create title label", "select something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select", "=============================================================================# # ==========main function for handling processing and GUI functions============# # =============================================================================# class", "selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text + \" for", "columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame", "data directory chdir(dataDirname) # list all files of type .xls allFiles = glob.glob(\"*.xls\")", "= pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first", "self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame =", "file. Check to make sure it is not \\ currently in use. Saving", "for select all and deselect all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20,", "list of dataframes if selected to if (self.sortOutput == 1): if (trial ==", "initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName != dirname + sep + initialFileName)", "elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame", "padx=100, pady=100) # function for determining which groups/animals will be analyzed def getGroups(self,", "== True): print \"Exiting program...\" exit() else: numError = 0 break try: dataDirname", "ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text + \" marked for analysis.\") return", "goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame)", "tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the title", "for the bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) #", "\"Please select at least one bird to analyze.\") def checkReformat(self, thresholdBox, reset): #", "pd.ExcelFile(file) index = allFiles.index(file) # now read excel file data into a DataFrame", "data directory numErrors = 0 while True: if (numErrors > 4): result =", "first entry # convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon", "currFile += 1 if ((time.time() - progressTime) > 5): # display progress progressTime", "thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a", "= trialFrame.append(tempFrame) # add this pigeon to trial frame # sort by group", "to make sure it is not currently \\ in use. Since processing will", "self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a", "event): for buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) #", "feature wall is removed, but the geometry of the environment \\ remains the", "text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output", "outputFrames # function to also create a processed dataframe for each pigeon/trial def", "saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' +", "initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName)", "\"Although \\ processing of the selected data files occurred as usual, there was", "self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\")", "elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial]", "and populate group and trial button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True,", "= trialFrame return outputFrames # function to also create a processed dataframe for", "# create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected", "size=16) # Create a frame for the title section # ====================================================================== titleFrame =", "analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with a threshold of %0.0f units,", "# take first # term from trial information in first entry # convert", "data files occurred as usual, there was an issue \\ writing to the", "loop through all of the pigeons loaded into the dictionary allPigeons for pigeonName,", "text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame,", "trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame", "set the size of the window # Initialize variables toolTipDelay = 700 #", "path): print \"\\nProcessing %1.0f data files with a threshold of %0.0f units, \\", "group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1],", "the size of the window # Initialize variables toolTipDelay = 700 # ms", "groupsForOutput # function for parsing dataframe based on groups def analyzeGroups(self, trials, animals):", "usual, there was an issue \\ writing to the designated excel file. Check", "marked for analysis.\") return (selectAll, deselectAll) # Callback for select all and de-select", "tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No", "trials for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial", "analysis.\") return (selectAll, deselectAll) # Callback for select all and de-select all buttons", "for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num]))", "> 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts. Do", "= AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function to also create a", "1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\",", "same threshold values, \\ this may not be an issue. Saving the output", "data matching requested trials for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame =", "tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of the selected data files occurred", "= tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts. Do you want to", "defaultThreshold = 50 outputFilename = \"\" pigeonName = \"\" allPigeons = {} allData", "in list of dataframes if selected to if (self.sortOutput == 1): if (trial", "self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] #", "= Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0,", "= {} groupsForOutput = [] trialButtons = [] trialButtonTooltips = [] animalButtons =", "data into a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp =", "a frame for handling all of the additional buttons # ====================================================================== buttonsFrame =", "self.animals else: keys = self.trialKeys # check which buttons are selected for buttonNum", "buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay,", "2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\",", "all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame for handling all", "def checkReformat(self, thresholdBox, reset): # re-run if threshold has been changed value =", "chosen groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as", "Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox,", "raise ValueError(\"empty string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed", "processing output cancelled\", \"Although \\ processing of the selected data files occurred as", "checkReformat(self, thresholdBox, reset): # re-run if threshold has been changed value = float(thresholdBox.get())", "select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for handling all", "read excel file data into a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon", "calculate how long formatting takes processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime,", "tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups", "time # =============================================================================# root = Tk() # create GUI root root.wm_title(\"Data Processor\") #", "Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips", "self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\",", "height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for", "same.\", \"Group where the feature wall is moved to the end of the", "sep + initialFileName) and (\".xls\" not in chosenName): chosenName = chosenName + \".xls\"", "Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay", "0 while True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\", "directory...\") dataDirname = dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname) # list", "in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame", "directory \\ selected over multipled attempts. Do you want to quit instead?\") if", "thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and", "gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame =", "thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the", "print \"Processing the selected data files took %1.2f seconds.\" % processingTime print \"\\nFormatted", "\\ in use. Since processing will not likely change for the same threshold", "range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses", "an enclosed square.\", \"Group where the feature wall is removed, but the geometry", "by <NAME> for use \\ in the laboratory of Dr. <NAME>.\") # Create", "Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames #", "\" + text + \" marked for analysis.\") return (selectAll, deselectAll) # Callback", "above.\") # Create and populate group and trial button frames # ====================================================================== trialFrame", "the current directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/',", "outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"])", "= allFiles.index(file) # now read excel file data into a DataFrame pigeonData =", "initial data processing was \\ cancelled.\") # =============================================================================# # ==========main function for handling", "buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold", "IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select", "based on the groups and animals\\ selected above.\") # Create and populate group", "= Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis", "Ver 2 from ToolTip import ToolTip # Import for directory dialog import tkFileDialog,", "the output excel spreadsheets by \\ trial type.\") # Create a quit button", "self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput,", "chdir(dataDirname) # list all files of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles", "to the designated excel file. Check to make sure it is not \\", "!= dirname + sep + initialFileName) and (\".xls\" not in chosenName): chosenName =", "\"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\",", "trial in trials: trialFrame = pd.DataFrame({}) # storage frame for each trial gotrialFrame", "= [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys", "import Frame, Style from os import chdir, path, sep import pandas as pd", "a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0]", "= 50 outputFilename = \"\" pigeonName = \"\" allPigeons = {} allData =", "files located in \" + outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing", "all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for handling all of", "numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\") #", "break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\"", "button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay,", "Please restart the program.\") # First read-in the data for file in allFiles:", "in chosenName): chosenName = chosenName + \".xls\" try: # create excelwriter object for", "bird to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if threshold has been", "of the buttons and components of the GUI def createComponents(self): # Create text", "default value.\") # Create a sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame,", "training group.\", \"Control group 1\", \"Control group 2\", \"Group where an extra wall", "[] trialButtons = [] trialButtonTooltips = [] animalButtons = [] # locate the", "width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500,", "Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton,", "the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time()", "progressTime = startTime # define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" %", "self.counter) l = Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True,", "Failed \\ attempt %0.0f/5\" % numErrors, \"Please select a valid directory...\") dataDirname =", "pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take", "# Callback for select all and de-select all buttons def allButtons(self, buttonGroup, event):", "!= []) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the", "(DataFrame) and read_excel # Import module with class/functions handling pigeon procesing from pigeon", "utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename):", "class/functions handling pigeon procesing from pigeon import Pigeon # Import tool tip function", "created by <NAME> for use \\ in the laboratory of Dr. <NAME>.\") #", "\"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\", \"Control", "bird in the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird],", "create title label root.geometry(\"840x520+300+300\") # set the size of the window # Initialize", "trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") #", "try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected data files took %1.2f", "= pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns,", "\"Processing the selected data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output", "data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI of this program", "except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue writing to the designated", "least one bird to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if threshold", "trial button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create", "expand=True) # run the initial formatting on the data folder analyzePigeons(defaultThreshold, path) print", "animals\\ selected above.\") # Create and populate group and trial button frames #", "== []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one bird to analyze.\")", "thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box thresholdBox = Entry(buttonsFrame,", "\", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue writing to", "= Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create", "anchor=CENTER, expand=True) # Create a frame for the bottom section # ====================================================================== footerFrame", "AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates option selected", "using the GUI of this program can be found in the supplied \\", "if not dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid", "writing to the designated excel file. Check to make sure it is not", "is not currently \\ in use. Since processing will not likely change for", "trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons", "of the environment \\ remains the same.\", \"Group where the feature wall is", "threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to", "====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title label title_label = Label(titleFrame,", "be analyzed def getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType == \"animals\"):", "!= [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for saving", "a frame for the title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) #", "<NAME>.\") # Create a canvas for drawing a separation line canv = Canvas(titleFrame,", "are selected for buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return", "from ToolTip import ToolTip # Import for directory dialog import tkFileDialog, tkMessageBox, tkFont,", "ttk import Frame, Style from os import chdir, path, sep import pandas as", "over any \\ element within the GUI.\\n\\n\" self.createComponents() # function for creating the", "createButtons(self, frame, vals, text): # create canvas for select all and deselect all", "button separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip =", "directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) #", "GUI.\\n\\n\" self.createComponents() # function for creating the select all and de-select button frames", "line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True)", "= [] # locate the current directory and file location dirname, mainFile =", "pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"):", "selected over multipled attempts. Do you want to quit instead?\") if (result ==", "formatting takes processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print", "# Create a checkbox for each test group self.trialLabels = [\"Non-reinforced training\", \"Control", "+= 1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This", "animals): outputFrames = {} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average", "columns, trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial frame #", "# re-run if threshold has been changed value = float(thresholdBox.get()) try: if (value", "title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created by <NAME> for", "the birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas", "to make sure it is not \\ currently in use. Saving operation cancelled.\")", "GUI root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") # set the size", "desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if", "\"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 #", "840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for the bottom section", "also available upon hovering over any \\ element within the GUI.\\n\\n\" self.createComponents() #", "# function for creating the select all and de-select button frames def createButtons(self,", "to threshold box above.\") # Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset", "startTime = time.time() # start timer progressTime = startTime # define the output", "data for statistical analysis in the statistical analysis software. \"\"\" from Tkinter import", "animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif (trialsForOutput ==", "print \"\\nProcessing %1.0f data files with a threshold of %0.0f units, \\ please", "element within the GUI.\\n\\n\" self.createComponents() # function for creating the select all and", "pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from trial", "location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user to", "= tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the", "\"animals\") if ((trialsForOutput != []) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput)", "to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue writing", "= gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial]", "\"Sorry there as an \\ issue writing to the designated excel file. Check", "50 outputFilename = \"\" pigeonName = \"\" allPigeons = {} allData = {}", "extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from", "groupType): groupsForOutput = [] if (groupType == \"animals\"): keys = self.animals else: keys", "test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry", "2\", \"Group where an extra wall and a \\ feature wall are placed", "=============================================================================# class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) #", "((trialsForOutput != []) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get", "====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100,", "datafile = pd.ExcelFile(file) index = allFiles.index(file) # now read excel file data into", "self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\", \"Control group 2\", \"Group where", "Output the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals,", "on the groups and animals\\ selected above.\") # Create and populate group and", "= pd.ExcelWriter(chosenName) # create the excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer,", "the environment \\ remains the same.\", \"Group where the feature wall is moved", "numErrors, \"Please select a valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd to", "pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial)", "(selectAll, deselectAll) # Callback for select all and de-select all buttons def allButtons(self,", "= time.time() # start timer progressTime = startTime # define the output spreadsheet", "# Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip =", "select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame for handling", "use the excel writer to save this pigeon to a data sheet in", "+ initialFileName) and (\".xls\" not in chosenName): chosenName = chosenName + \".xls\" try:", "as an \\ issue writing to the designated excel file. Check to make", "in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to save this pigeon to", "# Create a frame for handling all of the birds # ====================================================================== animalsFrame", "also save each pigeon data to a dictionary for GUI processing allData[pigeonName] =", "gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame)", "label root.geometry(\"840x520+300+300\") # set the size of the window # Initialize variables toolTipDelay", "text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) #", "delay=toolTipDelay + 500, text=\"This program was created by <NAME> for use \\ in", "of the window # Initialize variables toolTipDelay = 700 # ms defaultThreshold =", "sep); if (chosenName != dirname + sep + initialFileName) and (\".xls\" not in", "and animals\\ selected above.\") # Create and populate group and trial button frames", "pigeons loaded into the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile +=", "and (\".xls\" not in chosenName): chosenName = chosenName + \".xls\" try: # create", "the same.\", \"Group where the feature wall is moved to the end of", "all of the pigeons loaded into the dictionary allPigeons for pigeonName, pigeon in", "sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to", "window # Initialize variables toolTipDelay = 700 # ms defaultThreshold = 50 outputFilename", "took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of", "deselect all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2,", "1\", \"Control group 2\", \"Group where an extra wall and a \\ feature", "if selected to if (self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"]", "= ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text + \" marked for analysis.\")", "was an issue \\ writing to the designated excel file. Check to make", "= tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI", "parsing dataframe based on groups def analyzeGroups(self, trials, animals): outputFrames = {} columns", "font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets", "= list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates option selected columns", "= pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each pigeon and acquire data", "the designated excel file. Check to make sure it is not \\ currently", "command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes", "data for file in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) # now", "Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This is window #%s\" %", "de-select all buttons def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event", "for each bird in the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame,", "seconds.\" % processingTime print \"\\nFormatted output of all selected data files located in", "but the geometry of the environment \\ remains the same.\", \"Group where the", "side=LEFT) # Create a checkbox for each test group self.trialLabels = [\"Non-reinforced training\",", "sort by group and store in list of dataframes if selected to if", "= Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100)", "Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a", "in the laboratory of Dr. <NAME>.\") # Create a canvas for drawing a", "text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the", "in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all", "runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups and animals\\", "supplied \\ README file. Tooltips are also available upon hovering over any \\", "Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False))", "defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip", "= self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame =", "get the output name for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName", "to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile,", "components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame", "current directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep)", "cancelled.\") try: writer.save() print \"Saving output of chosen groups and pigeons to \",", "print \"Threshold has not changed from default\" return if (reset == True): thresholdBox.delete(0,", "\\ writing to the designated excel file. Check to make sure it is", "print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save each pigeon data to", "tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI app = App(root) root.resizable(width=FALSE, height=FALSE)", "outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for saving the excel", "= Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort", "\"Processing the selected data files took %1.2f seconds.\" % processingTime print \"\\nFormatted output", "2 from ToolTip import ToolTip # Import for directory dialog import tkFileDialog, tkMessageBox,", "title label root.geometry(\"840x520+300+300\") # set the size of the window # Initialize variables", "from trial information in first entry # convert unicode to utf8 pigeonName =", "todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName", "goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns,", "selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials: trialFrame =", "App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the", "button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the", "and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset", "0 # loop through all of the pigeons loaded into the dictionary allPigeons", "% (currFile, numFiles) # also save each pigeon data to a dictionary for", "# Create a frame for handling all of the additional buttons # ======================================================================", "for statistical analysis in the statistical analysis software. \"\"\" from Tkinter import *", "License, Ver 2 from ToolTip import ToolTip # Import for directory dialog import", "frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\", "to quit instead?\") if (result == True): print \"Exiting program...\" exit() else: numError", "== 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"])", "except: print \"Processing the selected data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial", "= AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add this", "auto-sort the output excel spreadsheets by \\ trial type.\") # Create a quit", "label title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip", "excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No", "= 0 while True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory", "# start timer progressTime = startTime # define the output spreadsheet outputFilename =", "files with a threshold of %0.0f units, \\ please wait...\" % (numFiles, calcForThreshold)", "---------------------------- This program was developed to automatically format input excel data for statistical", "this pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\"", "ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets by \\ trial type.\")", "= Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This is window #%s\"", "of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to", "by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License, Ver", "break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if not", "for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a", "ToolTip # Import for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time #", "to auto-sort the output excel spreadsheets by \\ trial type.\") # Create a", "box above.\") # Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and", "to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if threshold has been changed", "Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to", "data processing was \\ cancelled.\") # =============================================================================# # ==========main function for handling processing", "Dist\", \"Y Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({}) # storage frame", "threshold of %0.0f units, \\ please wait...\" % (numFiles, calcForThreshold) startTime = time.time()", "and trial button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) #", "Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc)", "formatting on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI", "Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16)", "processing will not likely change for the same threshold values, \\ this may", "groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an", "# create excelwriter object for outputting to excel writer = pd.ExcelWriter(chosenName) # create", "(reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except:", "canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for the bottom section # ======================================================================", "def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial formatting on", "time.time() # start timer progressTime = startTime # define the output spreadsheet outputFilename", "====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change", "group and store in list of dataframes if selected to if (self.sortOutput ==", "analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one", "tkMessageBox, tkFont, glob, time # =============================================================================# root = Tk() # create GUI root", "allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 # loop through all", "all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4))", "dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory -", "in first entry # convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create", "has been changed value = float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold", "text + \" marked for analysis.\") return (selectAll, deselectAll) # Callback for select", "a button for each bird in the data directory for bird in range(len(self.animals)):", "to data directory chdir(dataDirname) # list all files of type .xls allFiles =", "# Create a sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput,", "was developed to automatically format input excel data for statistical analysis in the", "please wait...\" % (numFiles, calcForThreshold) startTime = time.time() # start timer progressTime =", "analysis software. \"\"\" from Tkinter import * from ttk import Frame, Style from", "= time.time() # update progress time # find the indices of the goal", "was selected. Saving operation cancelled.\") try: writer.save() print \"Saving output of chosen groups", "not \\ currently in use. Saving operation cancelled.\") elif (trialsForOutput == [] and", "1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\",", "# Create a run button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y)", "= list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"])", "close the GUI.\") def create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\"", "# Create a button for each bird in the data directory for bird", "command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text", "Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial", "# Import tool tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under", "long formatting takes processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except:", "= ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default value.\") # Create a", "# Ask user to identify the data directory numErrors = 0 while True:", "selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text + \" for analysis.\")", "outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames", "from ttk import Frame, Style from os import chdir, path, sep import pandas", "and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask", "expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created by <NAME>", "chosenName = chosenName.replace('/', sep); if (chosenName != dirname + sep + initialFileName) and", "Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif", "reset): # re-run if threshold has been changed value = float(thresholdBox.get()) try: if", "reset threshold to default value.\") # Create a sort button self.sortOutput = IntVar()", "sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was selected. Saving operation", "numFiles) # also save each pigeon data to a dictionary for GUI processing", "keys = self.trialKeys # check which buttons are selected for buttonNum in buttons:", "as usual, there was an issue \\ writing to the designated excel file.", "fill=\"both\", expand=True, padx=100, pady=100) # function for determining which groups/animals will be analyzed", "for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create", "to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least", "footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button runButton = Button(footerFrame, width=200, text=\"Run", "processingTime print \"\\nFormatted output of all selected data files located in \" +", "all buttons def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event ==", "over multipled attempts. Do you want to quit instead?\") if (result == True):", "= pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True]", "\"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips =", "<NAME> lab ---------------------------- This program was developed to automatically format input excel data", "buttons and components of the GUI def createComponents(self): # Create text fonts for", "= pd.ExcelFile(file) index = allFiles.index(file) # now read excel file data into a", "from pigeon import Pigeon # Import tool tip function developed by <NAME> at", "# ==========main function for handling processing and GUI functions============# # =============================================================================# class App(Frame):", "side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas,", "in use. Since processing will not likely change for the same threshold values,", "True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a", "in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) # now read excel file", "the output name for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName =", "\\ cancelled.\") # =============================================================================# # ==========main function for handling processing and GUI functions============#", "outputFilename) except: print \"Processing the selected data files took %1.2f seconds.\" % processingTime", "of dataframes if selected to if (self.sortOutput == 1): if (trial == \"GO\"):", "check which buttons are selected for buttonNum in buttons: if buttonNum.get(): indexOfButton =", "and components of the GUI def createComponents(self): # Create text fonts for components", "\"Please select a valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd to data", "text=\"Run analysis based on the groups and animals\\ selected above.\") # Create and", "a separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X,", "program can be found in the supplied \\ README file. Tooltips are also", "handling all of the birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True,", "all of the birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER,", "= self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput", "Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon", "if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial", "locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to save this pigeon", "least one grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please", "# convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] =", "analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\") thresholdBox.delete(0, END)", "selected\", \"Please select at least one bird to analyze.\") def checkReformat(self, thresholdBox, reset):", "= gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial", "+ text + \" marked for analysis.\") return (selectAll, deselectAll) # Callback for", "Check to make sure it is not \\ currently in use. Saving operation", "= Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\")", "\"Please select at least one grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No", "directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root = Tk()", "thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip =", "select the data directory.\") if not dataDirname: raise ValueError(\"empty string\") break except ValueError:", "Create a checkbox for each test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\",", "dialog import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root = Tk() #", "section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title label title_label", "directory - Failed \\ attempt %0.0f/5\" % numErrors, \"Please select a valid directory...\")", "an extra wall and a \\ feature wall are placed in the environment", "(trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] =", "automatically format input excel data for statistical analysis in the statistical analysis software.", "width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame", "excel writer to save this pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter,", "\"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text + \"", "[])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for saving the", "indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe based on", "title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created by", "X and Y coordinates option selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for", "self.pack(fill=BOTH, expand=True) # run the initial formatting on the data folder analyzePigeons(defaultThreshold, path)", "this value to set a new threshold value \\ for calculating the max", "END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons", "hovering over any \\ element within the GUI.\\n\\n\" self.createComponents() # function for creating", "threshold box above.\") # Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold", "the indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel", "wall are placed in the environment to create an enclosed square.\", \"Group where", "function for determining which groups/animals will be analyzed def getGroups(self, buttons, groupType): groupsForOutput", "through all of the pigeons loaded into the dictionary allPigeons for pigeonName, pigeon", "Pecks\"])==True] return tempFrame # run the GUI app = App(root) root.resizable(width=FALSE, height=FALSE) root.mainloop()", "the pigeons loaded into the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile", "= list(allData.keys()) self.animalVals = [] # Create a button for each bird in", "side=BOTTOM) # Create a run button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run)", "\"\\nFormatted output of all selected data files located in \" + outputFilename +", "timer progressTime = startTime # define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\"", "mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user to identify the", "thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter", "within the GUI.\\n\\n\" self.createComponents() # function for creating the select all and de-select", "a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles)", "to a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe # also calculate how", "[\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] =", "and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\", "select all and deselect all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10,", "width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on", "outputFilename): print \"Processing the selected data files took %1.2f seconds.\" % processingTime print", "Initialize variables toolTipDelay = 700 # ms defaultThreshold = 50 outputFilename = \"\"", "and de-select button frames def createButtons(self, frame, vals, text): # create canvas for", "all of the buttons and components of the GUI def createComponents(self): # Create", "allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else:", "\"Group where the feature wall is moved to the end of the \\", "was created by <NAME> for use \\ in the laboratory of Dr. <NAME>.\")", "self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for determining which groups/animals will", "located in \" + outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f", "self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame =", "selected. Saving operation cancelled.\") try: writer.save() print \"Saving output of chosen groups and", "\"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"])", "= Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and", "a checkbox for each test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control", "takes processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing", "initial formatting on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the", "allData = {} groupsForOutput = [] trialButtons = [] trialButtonTooltips = [] animalButtons", "trial information in first entry # convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8')", "text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click", "num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) #", "# create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame", "\"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns,", "goal to be kept for data analysis.\") # Re-analyze with new thresholdBox reformatButton", "First read-in the data for file in allFiles: datafile = pd.ExcelFile(file) index =", "file. Check to make sure it is not currently \\ in use. Since", "not changed from default\" return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold)", "is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for", "selected\", \"Please select something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\",", "the max distance away from a goal to be kept for data analysis.\")", "the excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\",", "folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI of this program can", "groupsForOutput = [] trialButtons = [] trialButtonTooltips = [] animalButtons = [] #", "thresholdBox, reset): # re-run if threshold has been changed value = float(thresholdBox.get()) try:", "selected data files took %1.2f seconds.\" % processingTime print \"\\nFormatted output of all", "Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0,", "\"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\",", "the geometry of the environment \\ remains the same.\", \"Group where the feature", "self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100,", "pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each pigeon and acquire data matching", "from default\" return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value =", "chdir, path, sep import pandas as pd # import pandas data structures (DataFrame)", "upon hovering over any \\ element within the GUI.\\n\\n\" self.createComponents() # function for", "outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime", "(numFiles, calcForThreshold) startTime = time.time() # start timer progressTime = startTime # define", "selected for buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput", "data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6)", "animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame", "def createButtons(self, frame, vals, text): # create canvas for select all and deselect", "excel spreadsheets by \\ trial type.\") # Create a quit button quitButton =", "calculating the max distance away from a goal to be kept for data", "except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was selected. Saving operation cancelled.\")", "Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups", "cancelled.\") elif (trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select", "for drawing a separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840,", "if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path)", "toolTipDelay = 700 # ms defaultThreshold = 50 outputFilename = \"\" pigeonName =", "text=\"Click to reset threshold to default value.\") # Create a sort button self.sortOutput", "%0.0f/5\" % numErrors, \"Please select a valid directory...\") dataDirname = dataDirname.replace('/', sep) #", "chosenName.replace('/', sep); if (chosenName != dirname + sep + initialFileName) and (\".xls\" not", "statistical analysis software. \"\"\" from Tkinter import * from ttk import Frame, Style", "the selected data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\",", "canvas for drawing a separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10,", "create an enclosed square.\", \"Group where the feature wall is removed, but the", "220, 10, dash=(2, 4)) canv.pack(fill=X) # create each button separately selectAll = Button(frame,", "groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe based on groups def analyzeGroups(self,", "\" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip", "= ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text + \" for analysis.\") deselectAll", "directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) #", "buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe based on groups def", "output of initial data processing was \\ cancelled.\") # =============================================================================# # ==========main function", "trialButtonTooltips = [] animalButtons = [] # locate the current directory and file", "Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack()", "for file in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) # now read", "where the feature wall is removed, but the geometry of the environment \\", "attempts. Do you want to quit instead?\") if (result == True): print \"Exiting", "for buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput #", "root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") # set the size of the", "size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the title section", "[] if (groupType == \"animals\"): keys = self.animals else: keys = self.trialKeys #", "to also create a processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns,", "the laboratory of Dr. <NAME>.\") # Create a canvas for drawing a separation", "kept for data analysis.\") # Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply", "except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" %", "output cancelled\", \"Although \\ processing of the selected data files occurred as usual,", "text=\"Select all \" + text + \" for analysis.\") deselectAll = Button(frame, text=\"De-Select", "selected data files occurred as usual, there was an issue \\ writing to", "for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name", "dirname.replace('/', sep) # Ask user to identify the data directory numErrors = 0", "= path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime =", "a sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack()", "text=\"Select to auto-sort the output excel spreadsheets by \\ trial type.\") # Create", "self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw')", "chosenName): chosenName = chosenName + \".xls\" try: # create excelwriter object for outputting", "anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] # Create a button", "selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay,", "if ((trialsForOutput != []) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) #", "is removed, but the geometry of the environment \\ remains the same.\", \"Group", "function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public", "not currently \\ in use. Since processing will not likely change for the", "by group and store in list of dataframes if selected to if (self.sortOutput", "which buttons are selected for buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum)", "def getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType == \"animals\"): keys =", "# get the output name for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\")", "Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all", "sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets by", "printInfo(processingTime, outputFilename): print \"Processing the selected data files took %1.2f seconds.\" % processingTime", "tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root = Tk() # create GUI", "of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel", "buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe based", "analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if threshold has been changed value", "====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for each", "this pigeon to trial frame # sort by group and store in list", "Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else:", "chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName != dirname +", "Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF", "AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function to also create a processed", "= columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({}) #", "== \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] =", "- progressTime) > 5): # display progress progressTime = time.time() # update progress", "ToolTip import ToolTip # Import for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob,", "\"animals\") # Create a frame for handling all of the additional buttons #", "of the pigeons loaded into the dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems():", "allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame)", "Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box thresholdBox", "Create a sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont)", "buttons def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event == \"Select\":", "output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile =", "object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file", "a number\", \"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self,", "buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold:", "self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text +", "designated excel file. Check to make sure it is not currently \\ in", "AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon", "= pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from trial information in first", "square.\", \"Group where the feature wall is removed, but the geometry of the", "ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the GUI.\") def create_window(self): self.counter +=", "= Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value", "text=\"Change this value to set a new threshold value \\ for calculating the", "path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user to identify the data directory", "GUI.\") def create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter)", "def createComponents(self): # Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont", "expand=True, side=BOTTOM) # Create a run button runButton = Button(footerFrame, width=200, text=\"Run Processing\",", "AFtrialFrame = pd.DataFrame({}) # loop over each pigeon and acquire data matching requested", "one bird to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if threshold has", "be found in the supplied \\ README file. Tooltips are also available upon", "list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates option selected columns =", "self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to", "processing allData[pigeonName] = pigeon.dataframe # also calculate how long formatting takes processingTime =", "pigeon.dataframe # also calculate how long formatting takes processingTime = time.time() - startTime", "# storage frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) #", "all \" + text + \" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\",", "+ text + \" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals,", "grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at", "# now read excel file data into a DataFrame pigeonData = pd.read_excel(datafile) #", "trial frame # sort by group and store in list of dataframes if", "True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default value.\")", "Processor ============================ Created by: <NAME> For: Dr. <NAME> lab ---------------------------- This program was", "the GUI.\") def create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\" %", "= glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart", "to default value.\") # Create a sort button self.sortOutput = IntVar() sortButton =", "new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip", "variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") #", "frame for the bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM)", "= self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput != [])): outputFrames =", "# define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter =", "above.\") # Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\",", "handling processing and GUI functions============# # =============================================================================# class App(Frame): # Constructor def __init__(self,", "columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({})", "to identify the data directory numErrors = 0 while True: if (numErrors >", "ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created by <NAME> for use \\", "outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"]", "\\ in the laboratory of Dr. <NAME>.\") # Create a canvas for drawing", "import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root = Tk() # create", "import Pigeon # Import tool tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip,", "= startTime # define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold)", "time.time() # update progress time # find the indices of the goal locations", "= ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created by <NAME> for use", "string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt", "sure it is not \\ currently in use. Saving operation cancelled.\") elif (trialsForOutput", "default\" return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold", "title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title label", "%1.2f seconds.\" % processingTime print \"\\nFormatted output of all selected data files located", "\"Control group 2\", \"Group where an extra wall and a \\ feature wall", "Dr. <NAME>.\") # Create a canvas for drawing a separation line canv =", "import chdir, path, sep import pandas as pd # import pandas data structures", "General Public License, Ver 2 from ToolTip import ToolTip # Import for directory", "tkFont, glob, time # =============================================================================# root = Tk() # create GUI root root.wm_title(\"Data", "Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0,", "defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and components", "elif (trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something", "font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental", "to if (self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial", "else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"):", "Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial formatting on the data folder", "Pigeon # Import tool tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed", "= 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\")", "ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text + \" for analysis.\") deselectAll =", "and de-select all buttons def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if", "wall.\"] self.trialVals = [] # create all of the group buttons for num", "\"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save each pigeon data to a", "\\ trial type.\") # Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit)", "AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add", "\"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\",", "for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime) >", "calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 # loop through", "# ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for", "for using the GUI of this program can be found in the supplied", "developed to automatically format input excel data for statistical analysis in the statistical", "# create title label root.geometry(\"840x520+300+300\") # set the size of the window #", "the select all and de-select button frames def createButtons(self, frame, vals, text): #", "type.\") # Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip", "\\ remains the same.\", \"Group where the feature wall is moved to the", "with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack()", "Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function to also create", "+ '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with a threshold", "Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected data files took %1.2f seconds.\"", "else: numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the", "(self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon", "structures (DataFrame) and read_excel # Import module with class/functions handling pigeon procesing from", "analysis.\") # Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\", command=lambda:", "(trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"]", "and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif (trialsForOutput", "# sort by group and store in list of dataframes if selected to", "return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value,", "\"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\",", "Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\"", "over each pigeon and acquire data matching requested trials for pigeon in animals:", "self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text +", "to apply any changes to threshold box above.\") # Reset threshold to defaultThreshold", "self.animalVals = [] # Create a button for each bird in the data", "file. Tooltips are also available upon hovering over any \\ element within the", "# Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda:", "10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create each button separately selectAll =", "input excel data for statistical analysis in the statistical analysis software. \"\"\" from", "'''if X and Y coordinates option selected columns = columns.append([\"X Dist\", \"Y Dist\"])'''", "bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a", "processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of the selected data files", "to the designated excel file. Check to make sure it is not currently", "file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\"", "text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \"", "trials, animals): outputFrames = {} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\",", "\\ README file. Tooltips are also available upon hovering over any \\ element", "buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing", "t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This is window", "= Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box thresholdBox = Entry(buttonsFrame, width=10)", "self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and components of the GUI def", "AF Dist\"]) '''if X and Y coordinates option selected columns = columns.append([\"X Dist\",", "columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({}) # storage", "= \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X and", "# Initialize variables toolTipDelay = 700 # ms defaultThreshold = 50 outputFilename =", "def printInfo(processingTime, outputFilename): print \"Processing the selected data files took %1.2f seconds.\" %", "designated excel file. Check to make sure it is not \\ currently in", "animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals,", "text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \"", "object for outputting to excel writer = pd.ExcelWriter(chosenName) # create the excel writer", "outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function to also", "re-run if threshold has been changed value = float(thresholdBox.get()) try: if (value ==", "any changes to threshold box above.\") # Reset threshold to defaultThreshold resetButton =", "[\"Non-reinforced training group.\", \"Control group 1\", \"Control group 2\", \"Group where an extra", "anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame =", "gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns,", "in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save", "\"\" allPigeons = {} allData = {} groupsForOutput = [] trialButtons = []", "= tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if not dataDirname: raise ValueError(\"empty", "in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for", "[]): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No", "numErrors = 0 while True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No", "# also calculate how long formatting takes processingTime = time.time() - startTime try:", "try: # create excelwriter object for outputting to excel writer = pd.ExcelWriter(chosenName) #", "threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True))", "the same threshold values, \\ this may not be an issue. Saving the", "height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create each button separately", "name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from trial information", "= dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname) # list all files", "create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame for", "pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" %", "changed value = float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold has not", "(\".xls\" not in chosenName): chosenName = chosenName + \".xls\" try: # create excelwriter", "startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected data files took", "# locate the current directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname", "allPigeons = {} allData = {} groupsForOutput = [] trialButtons = [] trialButtonTooltips", "name for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate +", "create excelwriter object for outputting to excel writer = pd.ExcelWriter(chosenName) # create the", "the title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title", "select all and de-select button frames def createButtons(self, frame, vals, text): # create", "for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] ==", "create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l =", "for the same threshold values, \\ this may not be an issue. Saving", "except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\") # First read-in the", "Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") #", "range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons", "for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each", "\"Please select something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please", "been changed value = float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold has", "available upon hovering over any \\ element within the GUI.\\n\\n\" self.createComponents() # function", "+= 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" % numErrors, \"Please select", "[] # create all of the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar())", "fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals =", "took %1.2f seconds.\" % processingTime print \"\\nFormatted output of all selected data files", "outputFrames = {} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"]", "<NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License, Ver 2", "pigeonName = \"\" allPigeons = {} allData = {} groupsForOutput = [] trialButtons", "store in list of dataframes if selected to if (self.sortOutput == 1): if", "5): # display progress progressTime = time.time() # update progress time # find", "tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" % numErrors, \"Please select a valid", "= ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets by \\ trial", "__init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial formatting on the", "for buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output", "variables toolTipDelay = 700 # ms defaultThreshold = 50 outputFilename = \"\" pigeonName", "try: if (value == defaultThreshold): print \"Threshold has not changed from default\" return", "threshold values, \\ this may not be an issue. Saving the output of", "cancelled\", \"Although \\ processing of the selected data files occurred as usual, there", "time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname,", "pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime) > 5): #", "self.createComponents() # function for creating the select all and de-select button frames def", "all and deselect all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220,", "dataframes if selected to if (self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp", "\".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName != dirname", "# extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term", "animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360,", "pigeon.calcDist(calcForThreshold) # use the excel writer to save this pigeon to a data", "ms defaultThreshold = 50 outputFilename = \"\" pigeonName = \"\" allPigeons = {}", "Import module with class/functions handling pigeon procesing from pigeon import Pigeon # Import", "units, \\ please wait...\" % (numFiles, calcForThreshold) startTime = time.time() # start timer", "to be kept for data analysis.\") # Re-analyze with new thresholdBox reformatButton =", "for each test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature", "remains the same.\", \"Group where the feature wall is moved to the end", "# create GUI root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") # set", "an issue \\ writing to the designated excel file. Check to make sure", "\\ selected over multipled attempts. Do you want to quit instead?\") if (result", "each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each pigeon", "command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups and", "Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create each", "initialdir=sep, title=\"Please select the data directory.\") if not dataDirname: raise ValueError(\"empty string\") break", "de-select button frames def createButtons(self, frame, vals, text): # create canvas for select", "keys = self.animals else: keys = self.trialKeys # check which buttons are selected", "\"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame", "Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program", "Create a frame for handling all of the additional buttons # ====================================================================== buttonsFrame", "a canvas for drawing a separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0,", "training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\",", "= tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI app = App(root) root.resizable(width=FALSE,", "Dr. <NAME> lab ---------------------------- This program was developed to automatically format input excel", "event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self):", "buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput", "the program.\") # First read-in the data for file in allFiles: datafile =", "max distance away from a goal to be kept for data analysis.\") #", "the end of the \\ long wall.\"] self.trialVals = [] # create all", "a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay,", "quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the GUI.\") def", "this may not be an issue. Saving the output of initial data processing", "are also available upon hovering over any \\ element within the GUI.\\n\\n\" self.createComponents()", "files occurred as usual, there was an issue \\ writing to the designated", "wall is moved to the end of the \\ long wall.\"] self.trialVals =", "[\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group", "# function for determining which groups/animals will be analyzed def getGroups(self, buttons, groupType):", "\\ feature wall are placed in the environment to create an enclosed square.\",", "= self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial", "and GUI functions============# # =============================================================================# class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self,", "== \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame", "# =============================================================================# class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True)", "outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon", "value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid", "the supplied \\ README file. Tooltips are also available upon hovering over any", "processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the", "\"\" pigeonName = \"\" allPigeons = {} allData = {} groupsForOutput = []", "width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar =", "additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel", "= float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold has not changed from", "# Create the title label title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\",", "under # GNU General Public License, Ver 2 from ToolTip import ToolTip #", "\"Threshold has not changed from default\" return if (reset == True): thresholdBox.delete(0, END)", "first # term from trial information in first entry # convert unicode to", "pd.DataFrame({}) # loop over each pigeon and acquire data matching requested trials for", "to create an enclosed square.\", \"Group where the feature wall is removed, but", "= chosenName + \".xls\" try: # create excelwriter object for outputting to excel", "10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for the bottom section #", "allData[pigeonName] = pigeon.dataframe # also calculate how long formatting takes processingTime = time.time()", "separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER,", "Since processing will not likely change for the same threshold values, \\ this", "[] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif", "# set the size of the window # Initialize variables toolTipDelay = 700", "(currFile, numFiles) # also save each pigeon data to a dictionary for GUI", "self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame,", "\" + outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files", "self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for saving the excel file todaysDate", "END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please", "path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0,", "#%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for determining which", "self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput !=", "= time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName =", "cancelled\", \"Sorry there as an \\ issue writing to the designated excel file.", "with class/functions handling pigeon procesing from pigeon import Pigeon # Import tool tip", "============================ Created by: <NAME> For: Dr. <NAME> lab ---------------------------- This program was developed", "l = Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100,", "dictionary for GUI processing allData[pigeonName] = pigeon.dataframe # also calculate how long formatting", "Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) #", "self.trialKeys # check which buttons are selected for buttonNum in buttons: if buttonNum.get():", "= allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame =", "program...\" exit() else: numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please", "and read_excel # Import module with class/functions handling pigeon procesing from pigeon import", "components of the GUI def createComponents(self): # Create text fonts for components self.titleFont", "\"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns)", "DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] #", "else: buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\")", "\"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"):", "make sure it is not currently \\ in use. Since processing will not", "exit() else: numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select", "also calculate how long formatting takes processingTime = time.time() - startTime try: allWriter.save()", "\"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\", \"Control group", "data files with a threshold of %0.0f units, \\ please wait...\" % (numFiles,", "GUI def createComponents(self): # Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18)", "has not changed from default\" return if (reset == True): thresholdBox.delete(0, END) thresholdBox.insert(0,", "allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime)", "= [\"Non-reinforced training group.\", \"Control group 1\", \"Control group 2\", \"Group where an", "\"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"):", "handling pigeon procesing from pigeon import Pigeon # Import tool tip function developed", "700 # ms defaultThreshold = 50 outputFilename = \"\" pigeonName = \"\" allPigeons", "coordinates option selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials:", "in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired", "canv.pack(fill=X) # create each button separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals,", "tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the title section # ====================================================================== titleFrame", "apply any changes to threshold box above.\") # Reset threshold to defaultThreshold resetButton", "animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput != [])): outputFrames", "format input excel data for statistical analysis in the statistical analysis software. \"\"\"", "range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect", "how long formatting takes processingTime = time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename)", "there was an issue \\ writing to the designated excel file. Check to", "for trial in trials: trialFrame = pd.DataFrame({}) # storage frame for each trial", "\"\"\" Data Processor ============================ Created by: <NAME> For: Dr. <NAME> lab ---------------------------- This", "end of the \\ long wall.\"] self.trialVals = [] # create all of", "# also save each pigeon data to a dictionary for GUI processing allData[pigeonName]", "return (selectAll, deselectAll) # Callback for select all and de-select all buttons def", "== \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial", "printInfo(processingTime, outputFilename) except: print \"Processing the selected data files took %1.2f seconds.\" %", "[]) and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output", "feature wall are placed in the environment to create an enclosed square.\", \"Group", "Create a run button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip", "self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys())", "self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = []", "# term from trial information in first entry # convert unicode to utf8", "# Import module with class/functions handling pigeon procesing from pigeon import Pigeon #", "\"\\nTips for using the GUI of this program can be found in the", "Create a button for each bird in the data directory for bird in", "populate group and trial button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W,", "\"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and (animalsForOutput != [])):", "height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame,", "= 0 progressTime = 0 # loop through all of the pigeons loaded", "\"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\",", "frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox", "== \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame,", "= \"\" pigeonName = \"\" allPigeons = {} allData = {} groupsForOutput =", "%0.0f/%0.0f...\" % (currFile, numFiles) # also save each pigeon data to a dictionary", "run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold", "pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first #", "excel writer = pd.ExcelWriter(chosenName) # create the excel writer object for frameIndex in", "import pandas data structures (DataFrame) and read_excel # Import module with class/functions handling", "if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled", "= Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select", "defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a new threshold", "in use. Saving operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput == []):", "name \\ was selected. Saving operation cancelled.\") try: writer.save() print \"Saving output of", "trialFrame return outputFrames # function to also create a processed dataframe for each", "time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected data", "buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0)", "import ToolTip # Import for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time", "else: keys = self.trialKeys # check which buttons are selected for buttonNum in", "path) print \"\\nTips for using the GUI of this program can be found", "attempt %0.0f/5\" % numErrors, \"Please select a valid directory...\") dataDirname = dataDirname.replace('/', sep)", "restart the program.\") # First read-in the data for file in allFiles: datafile", "if (chosenName != dirname + sep + initialFileName) and (\".xls\" not in chosenName):", "[]): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one grouping to analyze.\") elif", "tool tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU", "all selected data files located in \" + outputFilename + '.' def analyzePigeons(calcForThreshold,", "numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data", "elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"]", "defaultThreshold): print \"Threshold has not changed from default\" return if (reset == True):", "Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp", "def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput !=", "startTime # define the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter", "data to a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe # also calculate", "display progress progressTime = time.time() # update progress time # find the indices", "tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame,", "processing and GUI functions============# # =============================================================================# class App(Frame): # Constructor def __init__(self, parent):", "self.animals = list(allData.keys()) self.animalVals = [] # Create a button for each bird", "wait...\" % (numFiles, calcForThreshold) startTime = time.time() # start timer progressTime = startTime", "text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function", "each pigeon and acquire data matching requested trials for pigeon in animals: tempFrame", "README file. Tooltips are also available upon hovering over any \\ element within", "+ \" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack()", "command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals =", "sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel spreadsheets by \\", "file name \\ was selected. Saving operation cancelled.\") try: writer.save() print \"Saving output", "will not likely change for the same threshold values, \\ this may not", "each test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\",", "glob, time # =============================================================================# root = Tk() # create GUI root root.wm_title(\"Data Processor\")", "createComponents(self): # Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont =", "Create a frame for the bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S,", "pady=100) # function for determining which groups/animals will be analyzed def getGroups(self, buttons,", "matching requested trials for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon]", "each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns]", "= ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a new threshold value \\", "[\"Non-reinforced training\", \"Control 1\", \"Control 2\", \"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys =", "except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold)", "height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame,", "the title label title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X,", "to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime,", "tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the", "type .xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets", "frames def createButtons(self, frame, vals, text): # create canvas for select all and", "sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also", "% self.counter) l = Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\",", "goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if X", "For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This", "= [] # Create a button for each bird in the data directory", "Information\"][0].split('_')[0] # take first # term from trial information in first entry #", "font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was created", "excel file. Check to make sure it is not \\ currently in use.", "groups/animals will be analyzed def getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType", "10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for the bottom", "# tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI app =", "== defaultThreshold): print \"Threshold has not changed from default\" return if (reset ==", "canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create a frame for the", "\") # Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip", "number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of", "# Import for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================#", "by: <NAME> For: Dr. <NAME> lab ---------------------------- This program was developed to automatically", "in the supplied \\ README file. Tooltips are also available upon hovering over", "handling all of the additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True)", "files took %1.2f seconds.\" % processingTime print \"\\nFormatted output of all selected data", "Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced", "away from a goal to be kept for data analysis.\") # Re-analyze with", "\"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial", "= 0 # loop through all of the pigeons loaded into the dictionary", "where the feature wall is moved to the end of the \\ long", "= self.trialKeys # check which buttons are selected for buttonNum in buttons: if", "select at least one bird to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run", "index = allFiles.index(file) # now read excel file data into a DataFrame pigeonData", "[] # locate the current directory and file location dirname, mainFile = path.split(path.abspath(\"__file__\"))", "= {} allData = {} groupsForOutput = [] trialButtons = [] trialButtonTooltips =", "if (value == defaultThreshold): print \"Threshold has not changed from default\" return if", "separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll,", "Create a frame for handling all of the birds # ====================================================================== animalsFrame =", "Saving operation cancelled.\") try: writer.save() print \"Saving output of chosen groups and pigeons", "pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue", "extra wall and a \\ feature wall are placed in the environment to", "os import chdir, path, sep import pandas as pd # import pandas data", "new threshold value \\ for calculating the max distance away from a goal", "columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns)", "Dist\"]) '''if X and Y coordinates option selected columns = columns.append([\"X Dist\", \"Y", "footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button runButton =", "Import tool tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under #", "at least one bird to analyze.\") def checkReformat(self, thresholdBox, reset): # re-run if", "command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to", "pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime) > 5):", "float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold has not changed from default\"", "pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the", "window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] # Create a", "Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay + 500, text=\"This program was", "gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame", "sep) # cd to data directory chdir(dataDirname) # list all files of type", "+ sep + initialFileName) and (\".xls\" not in chosenName): chosenName = chosenName +", "= {} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns", "function for handling processing and GUI functions============# # =============================================================================# class App(Frame): # Constructor", "pd # import pandas data structures (DataFrame) and read_excel # Import module with", "scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\",", "delay=toolTipDelay, text=\"Quit the program and close the GUI.\") def create_window(self): self.counter += 1", "True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over", "4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts. Do you", "will be analyzed def getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType ==", "allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected data files took", "= [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control", "changes to threshold box above.\") # Reset threshold to defaultThreshold resetButton = Button(buttonsFrame,", "elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one grouping", "Tkinter import * from ttk import Frame, Style from os import chdir, path,", "outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was selected.", "for handling all of the additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X,", "statistical analysis in the statistical analysis software. \"\"\" from Tkinter import * from", "\"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns", "change for the same threshold values, \\ this may not be an issue.", "\".xls\" try: # create excelwriter object for outputting to excel writer = pd.ExcelWriter(chosenName)", "parent) self.pack(fill=BOTH, expand=True) # run the initial formatting on the data folder analyzePigeons(defaultThreshold,", "\"Control group 1\", \"Control group 2\", \"Group where an extra wall and a", "text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to", "font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create", "deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay,", "canvas for select all and deselect all buttons canv = Canvas(frame, width=220, height=10)", "a new threshold value \\ for calculating the max distance away from a", "button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip =", "create each button separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack()", "pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected data files", "Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close", "1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif", "from Tkinter import * from ttk import Frame, Style from os import chdir,", "the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to save", "text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\")", "pandas data structures (DataFrame) and read_excel # Import module with class/functions handling pigeon", "10, dash=(2, 4)) canv.pack(fill=X) # create each button separately selectAll = Button(frame, text=\"Select", "and a \\ feature wall are placed in the environment to create an", "\" marked for analysis.\") return (selectAll, deselectAll) # Callback for select all and", "threshold has been changed value = float(thresholdBox.get()) try: if (value == defaultThreshold): print", "= Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) # Create", "= self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for saving the excel file", "= todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName", "use. Saving operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing", "{} groupsForOutput = [] trialButtons = [] trialButtonTooltips = [] animalButtons = []", "analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI of this program can be", "%1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of the", "text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the", "# create the excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except:", "of Dr. <NAME>.\") # Create a canvas for drawing a separation line canv", "selected data files located in \" + outputFilename + '.' def analyzePigeons(calcForThreshold, path):", "# Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box", "Create and populate group and trial button frames # ====================================================================== trialFrame = Frame(self)", "acquire data matching requested trials for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame", "moved to the end of the \\ long wall.\"] self.trialVals = [] #", "if (groupType == \"animals\"): keys = self.animals else: keys = self.trialKeys # check", "resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip =", "if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses def", "threshold to default value.\") # Create a sort button self.sortOutput = IntVar() sortButton", "(trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one grouping to", "# Create a frame for the bottom section # ====================================================================== footerFrame = Frame(self)", "Frame(self) titleFrame.pack(fill=X) # Create the title label title_label = Label(titleFrame, text=\"Data Processor For", "for outputting to excel writer = pd.ExcelWriter(chosenName) # create the excel writer object", "= [] trialButtonTooltips = [] animalButtons = [] # locate the current directory", "\"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\", \"Control group 2\",", "quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the GUI.\") def create_window(self):", "buttons, groupType): groupsForOutput = [] if (groupType == \"animals\"): keys = self.animals else:", "and store in list of dataframes if selected to if (self.sortOutput == 1):", "quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program", "expand=True) # Create a frame for the bottom section # ====================================================================== footerFrame =", "frame for the title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create", "\\ was selected. Saving operation cancelled.\") try: writer.save() print \"Saving output of chosen", "animalButtons = [] # locate the current directory and file location dirname, mainFile", "the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8)", "pigeonNametemp = pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from trial information in", "group 1\", \"Control group 2\", \"Group where an extra wall and a \\", "try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\")", "the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI of this", "todaysDate + '-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName =", "for GUI processing allData[pigeonName] = pigeon.dataframe # also calculate how long formatting takes", "and deselect all buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10,", "== True): thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not", "print \"Processing the selected data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing", "# create all of the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame,", "convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData)", "frame # sort by group and store in list of dataframes if selected", "pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return", "excel spreadsheets found. Please restart the program.\") # First read-in the data for", "Tooltips are also available upon hovering over any \\ element within the GUI.\\n\\n\"", "# Create a canvas for drawing a separation line canv = Canvas(titleFrame, width=840,", "in trials: trialFrame = pd.DataFrame({}) # storage frame for each trial gotrialFrame =", "text=\"Change threshold: \") # Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0,", "list(allData.keys()) self.animalVals = [] # Create a button for each bird in the", "this program can be found in the supplied \\ README file. Tooltips are", "(result == True): print \"Exiting program...\" exit() else: numError = 0 break try:", "numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" % numErrors, \"Please", "height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000))", "option selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial in trials: trialFrame", "goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF", "pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print", "the buttons and components of the GUI def createComponents(self): # Create text fonts", "text=\"This program was created by <NAME> for use \\ in the laboratory of", "issue \\ writing to the designated excel file. Check to make sure it", "if (self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\",", "new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply", "of the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont))", "program.\") # First read-in the data for file in allFiles: datafile = pd.ExcelFile(file)", "be an issue. Saving the output of initial data processing was \\ cancelled.\")", "chosenName = chosenName + \".xls\" try: # create excelwriter object for outputting to", "= tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName != dirname + sep", "buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for handling all of the", "program was created by <NAME> for use \\ in the laboratory of Dr.", "seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of the selected", "valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all", "= 700 # ms defaultThreshold = 50 outputFilename = \"\" pigeonName = \"\"", "functions============# # =============================================================================# class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH,", "== []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\") elif (trialsForOutput == []):", "for parsing dataframe based on groups def analyzeGroups(self, trials, animals): outputFrames = {}", "from a goal to be kept for data analysis.\") # Re-analyze with new", "Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns =", "= pigeon.dataframe # also calculate how long formatting takes processingTime = time.time() -", "title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip =", "the bottom section # ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create", "For: Dr. <NAME> lab ---------------------------- This program was developed to automatically format input", "frame for handling all of the additional buttons # ====================================================================== buttonsFrame = Frame(self)", "directory chdir(dataDirname) # list all files of type .xls allFiles = glob.glob(\"*.xls\") try:", "the GUI of this program can be found in the supplied \\ README", "unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def", "birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas =", "to trial frame # sort by group and store in list of dataframes", "500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set)", "pigeon and acquire data matching requested trials for pigeon in animals: tempFrame =", "\"No output file name \\ was selected. Saving operation cancelled.\") try: writer.save() print", "at least one grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\",", "0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] # Create", "command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" + text", "data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\", "dataframe based on groups def analyzeGroups(self, trials, animals): outputFrames = {} columns =", "500, text=\"This program was created by <NAME> for use \\ in the laboratory", "tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if not dataDirname: raise ValueError(\"empty string\")", "# ====================================================================== animalsFrame = Frame(self, width=100, height=360) animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame,", "# ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame,", "as pd # import pandas data structures (DataFrame) and read_excel # Import module", "it is not \\ currently in use. Saving operation cancelled.\") elif (trialsForOutput ==", "Create all of the buttons and components of the GUI def createComponents(self): #", "each pigeon data to a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe #", "resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default value.\") # Create", "output excel spreadsheets by \\ trial type.\") # Create a quit button quitButton", "in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was", "of chosen groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there", "Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon", "a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create", "sure it is not currently \\ in use. Since processing will not likely", "selected above.\") # Create and populate group and trial button frames # ======================================================================", "vals, text): # create canvas for select all and deselect all buttons canv", "(numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts.", "# =============================================================================# # ==========main function for handling processing and GUI functions============# # =============================================================================#", "all and de-select button frames def createButtons(self, frame, vals, text): # create canvas", "== \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial ==", "the GUI.\\n\\n\" self.createComponents() # function for creating the select all and de-select button", "try: writer.save() print \"Saving output of chosen groups and pigeons to \", chosenName", "Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the", "function to also create a processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame,", "(animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name for", "creating the select all and de-select button frames def createButtons(self, frame, vals, text):", "chosenName + \".xls\" try: # create excelwriter object for outputting to excel writer", "return outputFrames # function to also create a processed dataframe for each pigeon/trial", "user to identify the data directory numErrors = 0 while True: if (numErrors", "runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups and animals\\ selected", "tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def", "indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer", "= pd.DataFrame({}) # storage frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame =", "Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial ==", "add this pigeon to trial frame # sort by group and store in", "an issue. Saving the output of initial data processing was \\ cancelled.\") #", "not in chosenName): chosenName = chosenName + \".xls\" try: # create excelwriter object", "be kept for data analysis.\") # Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame,", "analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one", "= Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label,", "determining which groups/animals will be analyzed def getGroups(self, buttons, groupType): groupsForOutput = []", "on groups def analyzeGroups(self, trials, animals): outputFrames = {} columns = [\"Pigeon Name\",", "\"AF\"] self.trialTooltips = [\"Non-reinforced training group.\", \"Control group 1\", \"Control group 2\", \"Group", "expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry", "==========main function for handling processing and GUI functions============# # =============================================================================# class App(Frame): #", "{} allData = {} groupsForOutput = [] trialButtons = [] trialButtonTooltips = []", "= pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 # loop through all of", "for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial ==", "writer.save() print \"Saving output of chosen groups and pigeons to \", chosenName except:", "Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click", "label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \") # Threshold entry box thresholdBox =", "find the indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the", "for handling all of the birds # ====================================================================== animalsFrame = Frame(self, width=100, height=360)", "pigeon import Pigeon # Import tool tip function developed by <NAME> at #", "Callback for select all and de-select all buttons def allButtons(self, buttonGroup, event): for", "progressTime = 0 # loop through all of the pigeons loaded into the", "\\ attempt %0.0f/5\" % numErrors, \"Please select a valid directory...\") dataDirname = dataDirname.replace('/',", "variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay, text=\"Select to auto-sort the output excel", "+ \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName !=", "# ms defaultThreshold = 50 outputFilename = \"\" pigeonName = \"\" allPigeons =", "text + \" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\"))", "trialButtons = [] trialButtonTooltips = [] animalButtons = [] # locate the current", "information in first entry # convert unicode to utf8 pigeonName = pigeonNametemp.encode('utf8') #", "excel data for statistical analysis in the statistical analysis software. \"\"\" from Tkinter", "True): print \"Exiting program...\" exit() else: numError = 0 break try: dataDirname =", "GUI processing allData[pigeonName] = pigeon.dataframe # also calculate how long formatting takes processingTime", "gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function to", "width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0),", "= Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button runButton = Button(footerFrame,", "environment \\ remains the same.\", \"Group where the feature wall is moved to", "to reset threshold to default value.\") # Create a sort button self.sortOutput =", "function for parsing dataframe based on groups def analyzeGroups(self, trials, animals): outputFrames =", "gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame", "= Frame(self) titleFrame.pack(fill=X) # Create the title label title_label = Label(titleFrame, text=\"Data Processor", "the \\ long wall.\"] self.trialVals = [] # create all of the group", "AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial", "buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel =", "value to set a new threshold value \\ for calculating the max distance", "group and trial button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT)", "trialFrame.append(tempFrame) # add this pigeon to trial frame # sort by group and", "= Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton,", "create GUI root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") # set the", "# find the indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use", "% processingTime print \"\\nFormatted output of all selected data files located in \"", "=============================================================================# root = Tk() # create GUI root root.wm_title(\"Data Processor\") # create title", "gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over each pigeon and acquire", "while True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected", "save this pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress:", "operation cancelled.\") try: writer.save() print \"Saving output of chosen groups and pigeons to", "issue writing to the designated excel file. Check to make sure it is", "reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to threshold box above.\")", "(trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] =", "file location dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user", "% processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although \\ processing of the selected data", "outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF", "Reset threshold to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox,", "the designated excel file. Check to make sure it is not currently \\", "Create a canvas for drawing a separation line canv = Canvas(titleFrame, width=840, height=10)", "dirname, mainFile = path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user to identify", "Do you want to quit instead?\") if (result == True): print \"Exiting program...\"", "Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[\"AF-AF Distance\"] = AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"])", "calcForThreshold) startTime = time.time() # start timer progressTime = startTime # define the", "not be an issue. Saving the output of initial data processing was \\", "= path.split(path.abspath(\"__file__\")) dirname = dirname.replace('/', sep) # Ask user to identify the data", "program and close the GUI.\") def create_window(self): self.counter += 1 t = Toplevel(self)", "[] animalButtons = [] # locate the current directory and file location dirname,", "path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0", "All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" +", "Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] =", "Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({}) # storage frame for each", "the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput)", "delay=toolTipDelay, text=\"Change this value to set a new threshold value \\ for calculating", "take first # term from trial information in first entry # convert unicode", "a valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname)", "text=\"Deselect all \" + text + \" marked for analysis.\") return (selectAll, deselectAll)", "width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create each button", "+ outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with", "http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License, Ver 2 from ToolTip import", "print \"\\nTips for using the GUI of this program can be found in", "title label title_label = Label(titleFrame, text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True)", "deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text + \" marked", "want to quit instead?\") if (result == True): print \"Exiting program...\" exit() else:", "# GNU General Public License, Ver 2 from ToolTip import ToolTip # Import", "identify the data directory numErrors = 0 while True: if (numErrors > 4):", "likely change for the same threshold values, \\ this may not be an", "Y coordinates option selected columns = columns.append([\"X Dist\", \"Y Dist\"])''' for trial in", "the initial formatting on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using", "return groupsForOutput # function for parsing dataframe based on groups def analyzeGroups(self, trials,", "all files of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except:", "spreadsheets by \\ trial type.\") # Create a quit button quitButton = Button(buttonsFrame,", "trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame", "# create canvas for select all and deselect all buttons canv = Canvas(frame,", "software. \"\"\" from Tkinter import * from ttk import Frame, Style from os", "for creating the select all and de-select button frames def createButtons(self, frame, vals,", "ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" % numErrors,", "delay=toolTipDelay, text=\"Run analysis based on the groups and animals\\ selected above.\") # Create", "dictionary allPigeons for pigeonName, pigeon in allPigeons.iteritems(): currFile += 1 if ((time.time() -", "= [] animalButtons = [] # locate the current directory and file location", "4)) canv.pack(fill=X) # create each button separately selectAll = Button(frame, text=\"Select All\", command=lambda:", "class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run", "for data analysis.\") # Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new", "a run button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip =", "can be found in the supplied \\ README file. Tooltips are also available", "also create a processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial):", "self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default", "reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to threshold box", "from os import chdir, path, sep import pandas as pd # import pandas", "by \\ trial type.\") # Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\",", "Ask user to identify the data directory numErrors = 0 while True: if", "Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame outputFrames[\"AF-AF Distance\"]", "deselectAll) # Callback for select all and de-select all buttons def allButtons(self, buttonGroup,", "\"\\nProcessing %1.0f data files with a threshold of %0.0f units, \\ please wait...\"", "excelwriter object for outputting to excel writer = pd.ExcelWriter(chosenName) # create the excel", "instead?\") if (result == True): print \"Exiting program...\" exit() else: numError = 0", "= Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold label thresholdLabel = Label(buttonsFrame, text=\"Change threshold: \")", "pigeon data to a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe # also", "\" + text + \" for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda:", "threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any", "run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != [])", "with a threshold of %0.0f units, \\ please wait...\" % (numFiles, calcForThreshold) startTime", "thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this", "in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create", "GNU General Public License, Ver 2 from ToolTip import ToolTip # Import for", "<NAME> For: Dr. <NAME> lab ---------------------------- This program was developed to automatically format", "# create each button separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\"))", "in the environment to create an enclosed square.\", \"Group where the feature wall", "value \\ for calculating the max distance away from a goal to be", "frame, vals, text): # create canvas for select all and deselect all buttons", "l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for determining which groups/animals will be", "file data into a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp", "the data for file in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file) #", "start timer progressTime = startTime # define the output spreadsheet outputFilename = path.join(dirname,", "0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360) self.animalScrollbar = Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview)", "loop over each pigeon and acquire data matching requested trials for pigeon in", "# use the excel writer to save this pigeon to a data sheet", "canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10) canv.pack(fill=X, anchor=CENTER, expand=True) #", "trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial)", "AFColumns, trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame)", "Create a frame for the title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X)", "self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all", "there as an \\ issue writing to the designated excel file. Check to", "tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\") # First read-in the data", "create all of the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num],", "text=\"Click to apply any changes to threshold box above.\") # Reset threshold to", "procesing from pigeon import Pigeon # Import tool tip function developed by <NAME>", "birds selected\", \"Please select at least one bird to analyze.\") def checkReformat(self, thresholdBox,", "button frames def createButtons(self, frame, vals, text): # create canvas for select all", "writer to save this pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName)", "ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to threshold box above.\") # Reset", "list(columns) goColumns[-1] = \"Average Opp Dist\" AFColumns = list(goColumns) AFColumns.extend([\"Average AF Dist\"]) '''if", "tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry there as an \\ issue writing to the designated excel", "for analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip =", "resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default value.\") #", "frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop over", "[] trialButtonTooltips = [] animalButtons = [] # locate the current directory and", "ToolTip(resetButton, delay=toolTipDelay, text=\"Click to reset threshold to default value.\") # Create a sort", "for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root =", "This program was developed to automatically format input excel data for statistical analysis", "print \"\\nFormatted output of all selected data files located in \" + outputFilename", "# display progress progressTime = time.time() # update progress time # find the", "function for creating the select all and de-select button frames def createButtons(self, frame,", "and (animalsForOutput != [])): outputFrames = self.analyzeGroups(trialsForOutput, animalsForOutput) # get the output name", "long wall.\"] self.trialVals = [] # create all of the group buttons for", "analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput", "each button separately selectAll = Button(frame, text=\"Select All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip", "update progress time # find the indices of the goal locations in (x,y)", "sep import pandas as pd # import pandas data structures (DataFrame) and read_excel", "text): # create canvas for select all and deselect all buttons canv =", "variable=self.trialVals[num], font=self.componentFont)) trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals,", "Saving operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\",", "{} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns =", "ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a new threshold value \\ for", "delay=toolTipDelay, text=\"Click to apply any changes to threshold box above.\") # Reset threshold", "distance away from a goal to be kept for data analysis.\") # Re-analyze", "tip function developed by <NAME> at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General", "ValueError(\"empty string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory - Failed \\", "data files located in \" + outputFilename + '.' def analyzePigeons(calcForThreshold, path): print", "GUI functions============# # =============================================================================# class App(Frame): # Constructor def __init__(self, parent): Frame.__init__(self, parent)", "= IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton, delay=toolTipDelay,", "window #%s\" % self.counter) l.pack(side=\"top\", fill=\"both\", expand=True, padx=100, pady=100) # function for determining", "dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if not dataDirname: raise", "and close the GUI.\") def create_window(self): self.counter += 1 t = Toplevel(self) t.wm_title(\"Window", "pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame", "tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI app = App(root)", "+ \".xls\" try: # create excelwriter object for outputting to excel writer =", "trial type.\") # Create a quit button quitButton = Button(buttonsFrame, text=\"Quit\", command=self.quit) quitButton.pack()", "for select all and de-select all buttons def allButtons(self, buttonGroup, event): for buttonNum", "a processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame =", "initialFileName) and (\".xls\" not in chosenName): chosenName = chosenName + \".xls\" try: #", "term from trial information in first entry # convert unicode to utf8 pigeonName", "# First read-in the data for file in allFiles: datafile = pd.ExcelFile(file) index", "for handling processing and GUI functions============# # =============================================================================# class App(Frame): # Constructor def", "sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save each pigeon data", "goal locations in (x,y) pigeon.calcDist(calcForThreshold) # use the excel writer to save this", "output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) # also save each", "tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if (chosenName != dirname + sep +", "value.\") # Create a sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\",", "currently \\ in use. Since processing will not likely change for the same", "data analysis.\") # Re-analyze with new thresholdBox reformatButton = Button(buttonsFrame, text=\"Apply new threshold\",", "\"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average Opp", "cd to data directory chdir(dataDirname) # list all files of type .xls allFiles", "of initial data processing was \\ cancelled.\") # =============================================================================# # ==========main function for", "which groups/animals will be analyzed def getGroups(self, buttons, groupType): groupsForOutput = [] if", "currFile = 0 progressTime = 0 # loop through all of the pigeons", "Saving the output of initial data processing was \\ cancelled.\") # =============================================================================# #", "trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput = self.getGroups(self.animalVals, \"animals\") if ((trialsForOutput != []) and", "== [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please select something to analyze.\")", "= time.time() - startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected", "parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial formatting on the data", "sep) # Ask user to identify the data directory numErrors = 0 while", "if ((time.time() - progressTime) > 5): # display progress progressTime = time.time() #", "the excel writer to save this pigeon to a data sheet in output.xls", "= gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame =", "defaultThreshold) value = defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a", "set a new threshold value \\ for calculating the max distance away from", "self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip = ToolTip(sortButton,", "dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"]", "to save this pigeon to a data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print", "# ====================================================================== footerFrame = Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button", "all \" + text + \" marked for analysis.\") return (selectAll, deselectAll) #", "(trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial", "AFColumns.extend([\"Average AF Dist\"]) '''if X and Y coordinates option selected columns = columns.append([\"X", "Import for directory dialog import tkFileDialog, tkMessageBox, tkFont, glob, time # =============================================================================# root", "all of the additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) #", "= Button(buttonsFrame, text=\"Apply new threshold\", command=lambda: self.checkReformat(thresholdBox, False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay,", "tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one bird to analyze.\") def checkReformat(self,", "to excel writer = pd.ExcelWriter(chosenName) # create the excel writer object for frameIndex", "= defaultThreshold analyzePigeons(value, path) except: tkMessageBox.showinfo(\"Not a number\", \"Please enter a valid number.\")", "trialButtons[-1].pack(pady=8) trialButtonTooltips.append(ToolTip(trialButtons[-1], delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\")", "the statistical analysis software. \"\"\" from Tkinter import * from ttk import Frame,", "trial) AFtrialFrame = AFtrialFrame.append(AFFrame) tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) #", "thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a new threshold value", "1 tkMessageBox.showinfo(\"Invalid directory - Failed \\ attempt %0.0f/5\" % numErrors, \"Please select a", "a goal to be kept for data analysis.\") # Re-analyze with new thresholdBox", "tempFrame = self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon to", "bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1) animalButtons[-1].pack(pady=6) # create select/deselect", "delay=toolTipDelay, text=\"Click to reset threshold to default value.\") # Create a sort button", "\"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the desired analyses def run(self): trialsForOutput =", "selected data files took %1.2f seconds.\" % processingTime tkMessageBox.showinfo(\"Initial processing output cancelled\", \"Although", "result = tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts. Do you want", "if (result == True): print \"Exiting program...\" exit() else: numError = 0 break", "tkMessageBox.askyesno(title=\"Quit?\", message=\"No directory \\ selected over multipled attempts. Do you want to quit", "read_excel # Import module with class/functions handling pigeon procesing from pigeon import Pigeon", "if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe", "value = float(thresholdBox.get()) try: if (value == defaultThreshold): print \"Threshold has not changed", "self.counter += 1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t,", "# update progress time # find the indices of the goal locations in", "analyzeGroups(self, trials, animals): outputFrames = {} columns = [\"Pigeon Name\", \"Trial Type\", \"Removed", "pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame", "issue. Saving the output of initial data processing was \\ cancelled.\") # =============================================================================#", "tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run the GUI app", "cancelled.\") # =============================================================================# # ==========main function for handling processing and GUI functions============# #", "to the end of the \\ long wall.\"] self.trialVals = [] # create", "read-in the data for file in allFiles: datafile = pd.ExcelFile(file) index = allFiles.index(file)", "\\ currently in use. Saving operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput", "groups selected\", \"Please select at least one grouping to analyze.\") elif (animalsForOutput ==", "= [] trialButtons = [] trialButtonTooltips = [] animalButtons = [] # locate", "= Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame = Frame(self.animalCanvas, width=100, height=360)", "- startTime try: allWriter.save() printInfo(processingTime, outputFilename) except: print \"Processing the selected data files", "self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals = [] # Create a button for", "#%s\" % self.counter) l = Label(t, text=\"This is window #%s\" % self.counter) l.pack(side=\"top\",", "animalsFrame.pack(expand=True, anchor=CENTER, side=RIGHT) self.animalCanvas = Canvas(animalsFrame, width=100, height=360, scrollregion=(0, 0, 500, 1000)) self.newFrame", "self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for", "# loop through all of the pigeons loaded into the dictionary allPigeons for", "operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput == []): tkMessageBox.showinfo(\"Nothing selected\", \"Please", "\"Y Dist\"])''' for trial in trials: trialFrame = pd.DataFrame({}) # storage frame for", "groupsForOutput = [] if (groupType == \"animals\"): keys = self.animals else: keys =", "# add this pigeon to trial frame # sort by group and store", "entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay,", "use. Since processing will not likely change for the same threshold values, \\", "trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame # run", "animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if (trial == \"GO\"): goFrame =", "# Create all of the buttons and components of the GUI def createComponents(self):", "a frame for handling all of the birds # ====================================================================== animalsFrame = Frame(self,", "spreadsheets found. Please restart the program.\") # First read-in the data for file", "message=\"No directory \\ selected over multipled attempts. Do you want to quit instead?\")", "getGroups(self, buttons, groupType): groupsForOutput = [] if (groupType == \"animals\"): keys = self.animals", "1 if ((time.time() - progressTime) > 5): # display progress progressTime = time.time()", "analysis.\") deselectAll = Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll,", "save each pigeon data to a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe", "= Tk() # create GUI root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\")", "the feature wall is moved to the end of the \\ long wall.\"]", "module with class/functions handling pigeon procesing from pigeon import Pigeon # Import tool", "to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least", "1 t = Toplevel(self) t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This is", "run button runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton,", "buttons canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X)", "\"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all \" + text + \"", "% calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 # loop", "create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected data", "event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and components of the GUI", "\"Group where the feature wall is removed, but the geometry of the environment", "the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar()) animalButtons.append(Checkbutton(self.newFrame, text=self.animals[bird], variable=self.animalVals[bird], font=self.componentFont)) self.animalVals[-1].set(1)", "canv = Canvas(frame, width=220, height=10) canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) #", "pandas as pd # import pandas data structures (DataFrame) and read_excel # Import", "a \\ feature wall are placed in the environment to create an enclosed", "canv.create_line(20, 10, 220, 10, dash=(2, 4)) canv.pack(fill=X) # create each button separately selectAll", "not likely change for the same threshold values, \\ this may not be", "== []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at least one grouping to analyze.\")", "pd.ExcelWriter(outputFilename) currFile = 0 progressTime = 0 # loop through all of the", "Name\", \"Trial Type\", \"Removed Pecks\", \"Average Dist\"] goColumns = list(columns) goColumns[-1] = \"Average", "# create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for", "based on groups def analyzeGroups(self, trials, animals): outputFrames = {} columns = [\"Pigeon", "def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with a threshold of %0.0f", "Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame", "in allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime) > 5): # display", "len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\") # First read-in", "create canvas for select all and deselect all buttons canv = Canvas(frame, width=220,", "runButton = Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run", "(chosenName != dirname + sep + initialFileName) and (\".xls\" not in chosenName): chosenName", "if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame elif (trial == \"AF\"): outputFrames[\"AF-Opp", "\"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp", "(trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial ==", "def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) # Create all of the buttons and components of", "\"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"] self.trialTooltips = [\"Non-reinforced training", "groups and animals\\ selected above.\") # Create and populate group and trial button", "the window # Initialize variables toolTipDelay = 700 # ms defaultThreshold = 50", "= pd.DataFrame({}) # loop over each pigeon and acquire data matching requested trials", "(groupType == \"animals\"): keys = self.animals else: keys = self.trialKeys # check which", "lab ---------------------------- This program was developed to automatically format input excel data for", "on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips for using the GUI of", "def allButtons(self, buttonGroup, event): for buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1)", "== \"animals\"): keys = self.animals else: keys = self.trialKeys # check which buttons", "select a valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd to data directory", "ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based on the groups and animals\\ selected above.\") #", "delay=toolTipDelay, text=\"Deselect all \" + text + \" marked for analysis.\") return (selectAll,", "trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for each test group self.trialLabels =", "of the GUI def createComponents(self): # Create text fonts for components self.titleFont =", "button frames # ====================================================================== trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a", "to defaultThreshold resetButton = Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack()", "for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-'", "sort button self.sortOutput = IntVar() sortButton = Checkbutton(buttonsFrame, text=\"Sort\", variable=self.sortOutput, font=self.componentFont) sortButton.pack() sortTooltip", "number\", \"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event):", "All\", command=lambda: self.allButtons(vals, \"Select\")) selectAll.pack() selectTrialToolTip = ToolTip(selectAll, delay=toolTipDelay, text=\"Select all \" +", "buttons are selected for buttonNum in buttons: if buttonNum.get(): indexOfButton = buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton])", "analysis in the statistical analysis software. \"\"\" from Tkinter import * from ttk", "# =============================================================================# root = Tk() # create GUI root root.wm_title(\"Data Processor\") # create", "trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() # tempFrame =", "pd.DataFrame({}) # storage frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({})", "pd.ExcelWriter(chosenName) # create the excel writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex)", "fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create", "= pigeonNametemp.encode('utf8') # create pigeon allPigeons[pigeonName] = Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing", "Public License, Ver 2 from ToolTip import ToolTip # Import for directory dialog", "was \\ cancelled.\") # =============================================================================# # ==========main function for handling processing and GUI", "goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif (trial == \"AF\"): goFrame", "trialFrame = Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for each test", "an \\ issue writing to the designated excel file. Check to make sure", "outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was selected. Saving", "\"\"\" from Tkinter import * from ttk import Frame, Style from os import", "% (numFiles, calcForThreshold) startTime = time.time() # start timer progressTime = startTime #", "may not be an issue. Saving the output of initial data processing was", "self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals = list(allData.keys()) self.animalVals", "root.geometry(\"840x520+300+300\") # set the size of the window # Initialize variables toolTipDelay =", "processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment", "pigeon to trial frame # sort by group and store in list of", "selected to if (self.sortOutput == 1): if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] =", "use \\ in the laboratory of Dr. <NAME>.\") # Create a canvas for", "\\ this may not be an issue. Saving the output of initial data", "= self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) AFFrame = self.getFrame(pigeonFrame, AFColumns, trial) AFtrialFrame", "+ '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep); if", "Frame(self) footerFrame.pack(anchor=S, expand=True, side=BOTTOM) # Create a run button runButton = Button(footerFrame, width=200,", "output of chosen groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving cancelled\", \"Sorry", "requested trials for pigeon in animals: tempFrame = pd.DataFrame({}) pigeonFrame = allData[pigeon] if", "checkbox for each test group self.trialLabels = [\"Non-reinforced training\", \"Control 1\", \"Control 2\",", "# ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title label title_label =", "= Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for each test group", "<NAME> for use \\ in the laboratory of Dr. <NAME>.\") # Create a", "= AFtrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) outputFrames[trial] = trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if", "gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"): outputFrames[\"AF-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\",", "thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set a new", "import * from ttk import Frame, Style from os import chdir, path, sep", "create select/deselect all buttons self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for handling", "Button(buttonsFrame, text=\"Reset threshold and run\", command=lambda: self.checkReformat(thresholdBox, True)) resetButton.pack() resetButtonTooltip = ToolTip(resetButton, delay=toolTipDelay,", "+ \" marked for analysis.\") return (selectAll, deselectAll) # Callback for select all", "outputFrames[trial] = trialFrame return outputFrames # function to also create a processed dataframe", "pigeonData[\"Trial Information\"][0].split('_')[0] # take first # term from trial information in first entry", "(animalsForOutput == []): tkMessageBox.showinfo(\"No birds selected\", \"Please select at least one bird to", "allPigeons.iteritems(): currFile += 1 if ((time.time() - progressTime) > 5): # display progress", "titleFrame.pack(fill=X) # Create the title label title_label = Label(titleFrame, text=\"Data Processor For Pigeon", "writer object for frameIndex in outputFrames: outputFrames[frameIndex].to_excel(writer, sheet_name=frameIndex) except: tkMessageBox.showinfo(\"Saving cancelled\", \"No output", "= Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect", "any \\ element within the GUI.\\n\\n\" self.createComponents() # function for creating the select", "title=\"Please select the data directory.\") if not dataDirname: raise ValueError(\"empty string\") break except", "progressTime) > 5): # display progress progressTime = time.time() # update progress time", "command=self.quit) quitButton.pack() quitToolTip = ToolTip(quitButton, delay=toolTipDelay, text=\"Quit the program and close the GUI.\")", "# Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\", size=18) self.componentFont = tkFont.Font(family=\"Helvetica\",", "dataDirname = dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname) # list all", "% numErrors, \"Please select a valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd", "of all selected data files located in \" + outputFilename + '.' def", "found in the supplied \\ README file. Tooltips are also available upon hovering", "excel file data into a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name", "Tk() # create GUI root root.wm_title(\"Data Processor\") # create title label root.geometry(\"840x520+300+300\") #", "\"Please enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\"))", "= ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to threshold box above.\") #", "the data directory numErrors = 0 while True: if (numErrors > 4): result", "if (trial == \"GO\"): goFrame = self.getFrame(pigeonFrame, goColumns, trial) gotrialFrame = gotrialFrame.append(goFrame) elif", "path, sep import pandas as pd # import pandas data structures (DataFrame) and", "cancelled\", \"No output file name \\ was selected. Saving operation cancelled.\") try: writer.save()", "dirname + sep + initialFileName) and (\".xls\" not in chosenName): chosenName = chosenName", "+ 500, text=\"This program was created by <NAME> for use \\ in the", "feature wall is moved to the end of the \\ long wall.\"] self.trialVals", "= [] if (groupType == \"animals\"): keys = self.animals else: keys = self.trialKeys", "and acquire data matching requested trials for pigeon in animals: tempFrame = pd.DataFrame({})", "spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile = 0", "getFrame(self, pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna()", "currently in use. Saving operation cancelled.\") elif (trialsForOutput == [] and animalsForOutput ==", "= Scrollbar(animalsFrame, orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\",", "the environment to create an enclosed square.\", \"Group where the feature wall is", "of the additional buttons # ====================================================================== buttonsFrame = Frame(self) buttonsFrame.pack(fill=X, expand=True) # Threshold", "outputting to excel writer = pd.ExcelWriter(chosenName) # create the excel writer object for", "width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change this value to set", "\\ please wait...\" % (numFiles, calcForThreshold) startTime = time.time() # start timer progressTime", "Data Processor ============================ Created by: <NAME> For: Dr. <NAME> lab ---------------------------- This program", "(value == defaultThreshold): print \"Threshold has not changed from default\" return if (reset", "animalsForOutput) # get the output name for saving the excel file todaysDate =", "directory numErrors = 0 while True: if (numErrors > 4): result = tkMessageBox.askyesno(title=\"Quit?\",", "# list all files of type .xls allFiles = glob.glob(\"*.xls\") try: numFiles =", "self.componentFont = tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the title section #", "Created by: <NAME> For: Dr. <NAME> lab ---------------------------- This program was developed to", "+= 1 if ((time.time() - progressTime) > 5): # display progress progressTime =", "all of the group buttons for num in range(len(self.trialLabels)): self.trialVals.append(IntVar()) trialButtons.append(Checkbutton(trialFrame, text=self.trialLabels[num], variable=self.trialVals[num],", "def analyzeGroups(self, trials, animals): outputFrames = {} columns = [\"Pigeon Name\", \"Trial Type\",", "text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a", "# http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License, Ver 2 from ToolTip", "dirname = dirname.replace('/', sep) # Ask user to identify the data directory numErrors", "t.wm_title(\"Window #%s\" % self.counter) l = Label(t, text=\"This is window #%s\" % self.counter)", "if threshold has been changed value = float(thresholdBox.get()) try: if (value == defaultThreshold):", "'-' + '-'.join(trialsForOutput) + \".xls\" chosenName = tkFileDialog.asksaveasfilename(initialdir=dirname, initialfile=initialFileName) chosenName = chosenName.replace('/', sep);", "environment to create an enclosed square.\", \"Group where the feature wall is removed,", "of the selected data files occurred as usual, there was an issue \\", "it is not currently \\ in use. Since processing will not likely change", "excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate + '-' + '-'.join(trialsForOutput) +", "button for each bird in the data directory for bird in range(len(self.animals)): self.animalVals.append(IntVar())", "a threshold of %0.0f units, \\ please wait...\" % (numFiles, calcForThreshold) startTime =", "- Failed \\ attempt %0.0f/5\" % numErrors, \"Please select a valid directory...\") dataDirname", "not dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors += 1 tkMessageBox.showinfo(\"Invalid directory", "Button(footerFrame, width=200, text=\"Run Processing\", command=self.run) runButton.pack(fill=Y) runToolTip = ToolTip(runButton, delay=toolTipDelay, text=\"Run analysis based", "at # http://tkinter.unpythonic.net/wiki/ToolTip, Licensed under # GNU General Public License, Ver 2 from", "\"Feature Only\", \"Geometry Only\", \"Affine\"] self.trialKeys = [\"Nrtr\", \"C1\", \"C2\", \"FO\", \"GO\", \"AF\"]", "Constructor def __init__(self, parent): Frame.__init__(self, parent) self.pack(fill=BOTH, expand=True) # run the initial formatting", "titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the title label title_label = Label(titleFrame, text=\"Data", "= buttons.index(buttonNum) groupsForOutput.append(keys[indexOfButton]) return groupsForOutput # function for parsing dataframe based on groups", "data sheet in output.xls pigeon.dataframe.to_excel(allWriter, sheet_name=pigeonName) print \"Progress: %0.0f/%0.0f...\" % (currFile, numFiles) #", "= tkFont.Font(family=\"Helvetica\", size=16) # Create a frame for the title section # ======================================================================", "the GUI def createComponents(self): # Create text fonts for components self.titleFont = tkFont.Font(family=\"Arial\",", "orient=\"vertical\", command=self.animalCanvas.yview) self.animalCanvas.configure(yscrollcommand=self.animalScrollbar.set) self.animalScrollbar.pack(side=\"right\", fill=\"y\") self.animalCanvas.pack(side=\"top\") self.animalCanvas.create_window((0, 0), window=self.newFrame, anchor='nw') self.newFrame.bind(\"<Configure>\", self.scrollFunc) self.animals", "= gotrialFrame outputFrames[\"AF-AF Distance\"] = AFtrialFrame outputFrames[trial] = trialFrame return outputFrames # function", "frame for handling all of the birds # ====================================================================== animalsFrame = Frame(self, width=100,", "== \"GO\"): outputFrames[\"GO-Opp Distance\"] = gotrialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) elif (trial == \"AF\"):", "0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep, title=\"Please select the data directory.\") if", "wall is removed, but the geometry of the environment \\ remains the same.\",", "drawing a separation line canv = Canvas(titleFrame, width=840, height=10) canv.create_line(0, 10, 840, 10)", "delay=toolTipDelay, text=\"Select all \" + text + \" for analysis.\") deselectAll = Button(frame,", "for analysis.\") return (selectAll, deselectAll) # Callback for select all and de-select all", "progressTime = time.time() # update progress time # find the indices of the", "Style from os import chdir, path, sep import pandas as pd # import", "= trialFrame.sort([\"Trial Type\", \"Pigeon Name\"]) else: if (trial == \"GO\"): outputFrames[\"GO-Opp Distance\"] =", "# Output the desired analyses def run(self): trialsForOutput = self.getGroups(self.trialVals, \"trials\") animalsForOutput =", "select at least one grouping to analyze.\") elif (animalsForOutput == []): tkMessageBox.showinfo(\"No birds", "data directory.\") if not dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors +=", "enter a valid number.\") thresholdBox.delete(0, END) thresholdBox.insert(0, defaultThreshold) def scrollFunc(self, event): self.animalCanvas.configure(scrollregion=self.animalCanvas.bbox(\"all\")) #", "%1.0f data files with a threshold of %0.0f units, \\ please wait...\" %", "progress time # find the indices of the goal locations in (x,y) pigeon.calcDist(calcForThreshold)", "# Create a frame for the title section # ====================================================================== titleFrame = Frame(self)", "pigeonFrame, columns, trial): tempFrame = pigeonFrame.loc[pigeonFrame[\"Experiment Phase\"] == trial][columns] tempFrame = tempFrame.dropna() #", "dash=(2, 4)) canv.pack(fill=X) # create each button separately selectAll = Button(frame, text=\"Select All\",", "self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create a frame for handling all of the", "Frame(self) trialFrame.pack(expand=True, anchor=W, side=LEFT) # Create a checkbox for each test group self.trialLabels", "* from ttk import Frame, Style from os import chdir, path, sep import", "outputFilename + '.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with a", "== trial][columns] tempFrame = tempFrame.dropna() # tempFrame = tempFrame[~tempFrame[columns[-1]].isin([\"No Pecks\"])==True] return tempFrame #", "\"Exiting program...\" exit() else: numError = 0 break try: dataDirname = tkFileDialog.askdirectory(parent=root, initialdir=sep,", "delay=toolTipDelay, text=self.trialTooltips[num])) # create select/deselect all buttons self.createButtons(trialFrame, self.trialVals, \"experimental phases\") # Create", "'.' def analyzePigeons(calcForThreshold, path): print \"\\nProcessing %1.0f data files with a threshold of", "is not \\ currently in use. Saving operation cancelled.\") elif (trialsForOutput == []", "print \"Saving output of chosen groups and pigeons to \", chosenName except: tkMessageBox.showinfo(\"Saving", "threshold: \") # Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold)", "> 5): # display progress progressTime = time.time() # update progress time #", "threshold value \\ for calculating the max distance away from a goal to", "output file name \\ was selected. Saving operation cancelled.\") try: writer.save() print \"Saving", "outputFilename = \"\" pigeonName = \"\" allPigeons = {} allData = {} groupsForOutput", "output name for saving the excel file todaysDate = time.strftime(\"%Y-%m-%d\") initialFileName = todaysDate", "= Pigeon(pigeonData) def printInfo(processingTime, outputFilename): print \"Processing the selected data files took %1.2f", "groups def analyzeGroups(self, trials, animals): outputFrames = {} columns = [\"Pigeon Name\", \"Trial", "pigeon procesing from pigeon import Pigeon # Import tool tip function developed by", "of %0.0f units, \\ please wait...\" % (numFiles, calcForThreshold) startTime = time.time() #", "for the title section # ====================================================================== titleFrame = Frame(self) titleFrame.pack(fill=X) # Create the", "= len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the program.\") # First", "= dirname.replace('/', sep) # Ask user to identify the data directory numErrors =", "glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found. Please restart the", "False)) reformatButton.pack() reformatTooltip = ToolTip(reformatButton, delay=toolTipDelay, text=\"Click to apply any changes to threshold", "box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip = ToolTip(thresholdBox, delay=toolTipDelay, text=\"Change", "\\ for calculating the max distance away from a goal to be kept", "a dictionary for GUI processing allData[pigeonName] = pigeon.dataframe # also calculate how long", "# Threshold entry box thresholdBox = Entry(buttonsFrame, width=10) thresholdBox.pack() thresholdBox.insert(0, defaultThreshold) thresholdBoxTooltip =", "tkMessageBox.showinfo(\"Saving cancelled\", \"No output file name \\ was selected. Saving operation cancelled.\") try:", "create a processed dataframe for each pigeon/trial def getFrame(self, pigeonFrame, columns, trial): tempFrame", "text=\"Data Processor For Pigeon Experiment\", font=self.titleFont) title_label.pack(fill=X, expand=True) title_labelTooltip = ToolTip(title_label, delay=toolTipDelay +", "directory.\") if not dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors += 1", "the groups and animals\\ selected above.\") # Create and populate group and trial", "is moved to the end of the \\ long wall.\"] self.trialVals = []", "trials: trialFrame = pd.DataFrame({}) # storage frame for each trial gotrialFrame = pd.DataFrame({})", ".xls allFiles = glob.glob(\"*.xls\") try: numFiles = len(allFiles) except: tkMessageBox.showinfo(\"No excel spreadsheets found.", "something to analyze.\") elif (trialsForOutput == []): tkMessageBox.showinfo(\"No groups selected\", \"Please select at", "Processor\") # create title label root.geometry(\"840x520+300+300\") # set the size of the window", "the output of initial data processing was \\ cancelled.\") # =============================================================================# # ==========main", "into a DataFrame pigeonData = pd.read_excel(datafile) # extract pigeon name pigeonNametemp = pigeonData[\"Trial", "storage frame for each trial gotrialFrame = pd.DataFrame({}) AFtrialFrame = pd.DataFrame({}) # loop", "# run the initial formatting on the data folder analyzePigeons(defaultThreshold, path) print \"\\nTips", "the output spreadsheet outputFilename = path.join(dirname, \"output-threshold-%0.0f.xls\" % calcForThreshold) allWriter = pd.ExcelWriter(outputFilename) currFile", "self.createButtons(animalsFrame, self.animalVals, \"animals\") # Create a frame for handling all of the additional", "valid directory...\") dataDirname = dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname) #", "buttonNum in range(len(buttonGroup)): if event == \"Select\": buttonGroup[buttonNum].set(1) else: buttonGroup[buttonNum].set(0) # Output the", "allFiles.index(file) # now read excel file data into a DataFrame pigeonData = pd.read_excel(datafile)", "self.animalVals, \"animals\") # Create a frame for handling all of the additional buttons", "dataDirname.replace('/', sep) # cd to data directory chdir(dataDirname) # list all files of", "self.getFrame(pigeonFrame, columns, trial) trialFrame = trialFrame.append(tempFrame) # add this pigeon to trial frame", "((time.time() - progressTime) > 5): # display progress progressTime = time.time() # update", "the data directory.\") if not dataDirname: raise ValueError(\"empty string\") break except ValueError: numErrors", "Button(frame, text=\"De-Select All\", command=lambda: self.allButtons(vals, \"De-Select\")) deselectAll.pack() deselectTrialToolTip = ToolTip(deselectAll, delay=toolTipDelay, text=\"Deselect all" ]
[ "DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic logger.') # Parse what", "import os from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a sample message", "in. args = parser.parse_args() # Get the name of the logger. if args.logger_name:", "recieved from queue: %s, message data: %s', queue_name, message_data) logger.info('Message received from queue:", "level to be used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level", "= argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify", "message_data, stack_info=True) if __name__ == '__main__': # Setup all the CLI arguments for", "args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s —", "logger.info('Message received from queue: %s', queue_name) logger.warning('Old message format detected. Queue: %s, Message", "'__main__': # Setup all the CLI arguments for this module. parser = argparse.ArgumentParser()", "logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or", "'--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for", "message from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__ == '__main__':", "argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a", "{'customerId': 42, 'productId': 12345, 'quantity': 5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message", "queue_name, message_data) logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing", "message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt = 2 logger =", "of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING,", "attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data:", "basic logger.') # Parse what was passed in. args = parser.parse_args() # Get", "%s', queue_name) logger.warning('Old message format detected. Queue: %s, Message data: %s', queue_name, message_data)", "handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s", "2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data: %s', queue_name,", "Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from queue: %s, message_data: %s.', queue_name,", "'productId': 12345, 'quantity': 5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved from", "queue_name) logger.warning('Old message format detected. Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could", "or CRITICAL) for the basic logger.') # Parse what was passed in. args", "name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO,", "(NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic logger.') # Parse", "= __name__ # Get the level to be used for the logger's handler.", "stack_info=True) logger.critical('Error processing message from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if", "the basic logger.') # Parse what was passed in. args = parser.parse_args() #", "— %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' +", "%(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL'])", "queue: %s, message data: %s', queue_name, message_data) logger.info('Message received from queue: %s', queue_name)", "what was passed in. args = parser.parse_args() # Get the name of the", "for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO'", "logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] =", "if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the level", "queue_name = 'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt =", "if __name__ == '__main__': # Setup all the CLI arguments for this module.", "this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.')", "<filename>blueprints/loggers/demos/basic_logger_demo.py import argparse import os from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends", "os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the level to be", "passed in. args = parser.parse_args() # Get the name of the logger. if", "__name__ # Get the level to be used for the logger's handler. if", "your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR,", "%s, Message data: %s', queue_name, message_data) logger.error('Could not connect to queue. Attempt: %i',", "attempt, stack_info=True) logger.critical('Error processing message from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True)", "CRITICAL) for the basic logger.') # Parse what was passed in. args =", "logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic logger.')", "message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old message format detected. Queue: %s,", "each logging level. ''' queue_name = 'orders' message_data = {'customerId': 42, 'productId': 12345,", "name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format: ' + os.environ['FORMAT_STRING'])", "for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your", "'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt = 2 logger", "queue: %s', queue_name) logger.warning('Old message format detected. Queue: %s, Message data: %s', queue_name,", "42, 'productId': 12345, 'quantity': 5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved", "logger.') # Parse what was passed in. args = parser.parse_args() # Get the", "not connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from queue:", "for each logging level. ''' queue_name = 'orders' message_data = {'customerId': 42, 'productId':", "processing message from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__ ==", "all the CLI arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify", "= '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: '", "from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__ == '__main__': #", "%(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: '", "parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging", "Get the level to be used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL']", "queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from queue: %s, message_data: %s.',", "to be used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else:", "stack_info=True) if __name__ == '__main__': # Setup all the CLI arguments for this", "for the basic logger.') # Parse what was passed in. args = parser.parse_args()", "= args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the level to be used", "'%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' +", "%s', queue_name, message_data) logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error", "used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] =", "blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a sample message for each logging", "the name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] =", "sample message for each logging level. ''' queue_name = 'orders' message_data = {'customerId':", "CLI arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name", "arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of", "message data: %s', queue_name, message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old message", "+ os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format: ' + os.environ['FORMAT_STRING']) basic_logger.create_logger() log_sample_messages()", "data: %s', queue_name, message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old message format", "Message data: %s', queue_name, message_data) logger.error('Could not connect to queue. Attempt: %i', attempt,", "data: %s', queue_name, message_data) logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True)", "from queue: %s, message data: %s', queue_name, message_data) logger.info('Message received from queue: %s',", "Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could not connect to queue. Attempt:", "level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic logger.') #", "os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d —", "os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name:", "— %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format:", "'quantity': 5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s,", "message_data: %s.', queue_name, message_data, stack_info=True) if __name__ == '__main__': # Setup all the", "else: os.environ['LOGGER_NAME'] = __name__ # Get the level to be used for the", "# Parse what was passed in. args = parser.parse_args() # Get the name", "Setup all the CLI arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name',", "os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s —", "was passed in. args = parser.parse_args() # Get the name of the logger.", "Sends a sample message for each logging level. ''' queue_name = 'orders' message_data", "the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING']", "'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger", "os from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a sample message for", "%i', attempt, stack_info=True) logger.critical('Error processing message from queue: %s, message_data: %s.', queue_name, message_data,", "5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message", "the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG,", "argparse import os from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a sample", "args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s", "to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from queue: %s, message_data:", "logger.critical('Error processing message from queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__", "name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__", "Get the name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME']", "the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get", "help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET,", "''' Sends a sample message for each logging level. ''' queue_name = 'orders'", "= 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s'", "parser.add_argument('-l', '--logging_level', help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL)", "== '__main__': # Setup all the CLI arguments for this module. parser =", "from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a sample message for each", "%s, message data: %s', queue_name, message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old", "12345, 'quantity': 5} attempt = 2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue:", "def log_sample_messages(): ''' Sends a sample message for each logging level. ''' queue_name", "detected. Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could not connect to queue.", "import basic_logger def log_sample_messages(): ''' Sends a sample message for each logging level.", "log_sample_messages(): ''' Sends a sample message for each logging level. ''' queue_name =", "Parse what was passed in. args = parser.parse_args() # Get the name of", "= args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s", "# Get the name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else:", "logger.debug('Message recieved from queue: %s, message data: %s', queue_name, message_data) logger.info('Message received from", "— %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level:", "a sample message for each logging level. ''' queue_name = 'orders' message_data =", "logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data: %s', queue_name, message_data)", "the level to be used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] =", "the CLI arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the", "logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the", "%s.', queue_name, message_data, stack_info=True) if __name__ == '__main__': # Setup all the CLI", "ERROR, or CRITICAL) for the basic logger.') # Parse what was passed in.", "= parser.parse_args() # Get the name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] =", "logging level. ''' queue_name = 'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity':", "module. parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.') parser.add_argument('-l',", "be used for the logger's handler. if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL']", "from queue: %s', queue_name) logger.warning('Old message format detected. Queue: %s, Message data: %s',", "message format detected. Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could not connect", "if args.logging_level: os.environ['LOGGING_LEVEL'] = args.logging_level else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s —", "queue_name, message_data, stack_info=True) if __name__ == '__main__': # Setup all the CLI arguments", "'--logger_name', help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level', help='Specify a logging level", "basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data: %s', queue_name, message_data) logger.info('Message received", "message for each logging level. ''' queue_name = 'orders' message_data = {'customerId': 42,", "help='Specify a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the", "args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the level to", "queue_name, message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old message format detected. Queue:", "' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format: ' + os.environ['FORMAT_STRING']) basic_logger.create_logger()", "= 2 logger = basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data: %s',", "WARNING, ERROR, or CRITICAL) for the basic logger.') # Parse what was passed", "# Get the level to be used for the logger's handler. if args.logging_level:", "connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from queue: %s,", "__name__ == '__main__': # Setup all the CLI arguments for this module. parser", "%s', queue_name, message_data) logger.info('Message received from queue: %s', queue_name) logger.warning('Old message format detected.", "of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name else: os.environ['LOGGER_NAME'] = __name__ #", "args.logger_name else: os.environ['LOGGER_NAME'] = __name__ # Get the level to be used for", "a logging level (NOTSET, DEBUG, INFO, WARNING, ERROR, or CRITICAL) for the basic", "logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message from", "queue: %s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__ == '__main__': # Setup", "''' queue_name = 'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt", "= basic_logger.get_logger() logger.debug('Message recieved from queue: %s, message data: %s', queue_name, message_data) logger.info('Message", "# Setup all the CLI arguments for this module. parser = argparse.ArgumentParser() parser.add_argument('-n',", "args = parser.parse_args() # Get the name of the logger. if args.logger_name: os.environ['LOGGER_NAME']", "else: os.environ['LOGGING_LEVEL'] = 'INFO' os.environ['FORMAT_STRING'] = '%(asctime)s — %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d", "logger.warning('Old message format detected. Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could not", "print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format: ' +", "level. ''' queue_name = 'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5}", "received from queue: %s', queue_name) logger.warning('Old message format detected. Queue: %s, Message data:", "= 'orders' message_data = {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt = 2", "os.environ['LOGGER_NAME'] = __name__ # Get the level to be used for the logger's", "%s, message_data: %s.', queue_name, message_data, stack_info=True) if __name__ == '__main__': # Setup all", "%(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging", "%(message)s' print('Logger name: ' + os.environ['LOGGER_NAME']) print('Logging level: ' + os.environ['LOGGING_LEVEL']) print('Format: '", "message_data) logger.error('Could not connect to queue. Attempt: %i', attempt, stack_info=True) logger.critical('Error processing message", "INFO, WARNING, ERROR, or CRITICAL) for the basic logger.') # Parse what was", "import argparse import os from blueprints.loggers import basic_logger def log_sample_messages(): ''' Sends a", "= {'customerId': 42, 'productId': 12345, 'quantity': 5} attempt = 2 logger = basic_logger.get_logger()", "basic_logger def log_sample_messages(): ''' Sends a sample message for each logging level. '''", "format detected. Queue: %s, Message data: %s', queue_name, message_data) logger.error('Could not connect to", "— %(name)s — %(levelname)s — %(module)s:%(funcName)s:%(lineno)d — %(message)s' print('Logger name: ' + os.environ['LOGGER_NAME'])", "parser.parse_args() # Get the name of the logger. if args.logger_name: os.environ['LOGGER_NAME'] = args.logger_name", "parser = argparse.ArgumentParser() parser.add_argument('-n', '--logger_name', help='Specify the name of your logger.') parser.add_argument('-l', '--logging_level'," ]
[ "* shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when sell", "growths from two groups --==|||| def growth_plot (growth_1, growth_2, file): file_name = file", "EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash =", "if op_day < hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1,", "for the benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is:", "wallet = {} if ticker in ticker_list: for investor in investors_group: wallet[investor] =", "AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==||||", "len(free_cash) == 0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1]", "position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\":", "next_op == \"B\": next_op = \"S\" elif next_op == \"S\": next_op = \"B\"", "x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color =", "1), name = \"Investor_\" + str(wallet)) data = data + [investor_data] \"\"\" lt_evolution", "= go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line =", "stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width = 5 ),", "(wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth of: \"", "datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10) #Declare the tickers in the", "--==|||| def monkey_population (n, freq_op): monkeys = {} for i in range(n): monkeys[i]", "0: if len(free_cash) == 0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0)", "False, output_type = 'file', filename = file_name, auto_open = True) return True ##########################################", "FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE", "i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A", "stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex)", "print(\"--- %s seconds to calculate all wallets growth ---\" % (time.time() - start_time))", "and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks", "= growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets for an investors group", "% (time.time() - start_time)) #Calculate the growth for the benchmark and the wallets", "run the program again\") return False #||||==-- Growth percentage of the investment for", "if len(free_cash) == 0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif", "shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash / share_price)", "= False, output_type = 'file', filename = file_name, auto_open = True) return True", "ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file', filename", "65) print(\"--- %s seconds to populate all monkeys ---\" % (time.time() - start_time))", "of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth of: \"", "output_type = 'file', filename = file_name, auto_open = True) return True #||||==-- Plot", "monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population (200, 65) print(\"--- %s", "\"Prueba2\") #print(\"--- %s seconds to plot patient monkeys wallets---\" % (time.time() - start_time))", "0 #Build vector with operations for i in range(trade_days-1): if op_day < hold_op:", "plotly import plotly.graph_objects as go import plotly.figure_factory as ff ########################################## ######## INPUT DATA", "operations for the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population (200,", "round(np.random.uniform(1, freq_op)) #days between operations operations = [] #Vector with operations executed op_day", "start ---\" % (time.time() - start_time)) #Get the prices and calculate the days", "#print (\"Monkey number: \" + str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1])", "operation is a BUY by setting a SELL the last day (if needed)", "group --==|||| def benchmark_growth (ticker): if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1]", "stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices", "A VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations = [0]", "def lt_broker (trade_days): operations = [0] * (trade_days) operations[0] = \"B\" operations[-1] =", "for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"]", "== 0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] ==", "'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"]", "coding: utf-8 -*- \"\"\" Created on Tue Dec 10 21:44:03 2019 @author: Christian", "#||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations", "Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file', filename", "groups --==|||| def growth_plot (growth_1, growth_2, file): file_name = file + \".html\" #", "#First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days", "free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when sell shares share_price", "import numpy as np import plotly import plotly.graph_objects as go import plotly.figure_factory as", "1', 'Group 2'] # Create distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels,", "layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name, auto_open =", "vector with operations for i in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day", "wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor)) #print (\"Ends", "for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0]", "lt_evolution = go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line", "free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days", "price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==--", "Growth percentage of the investment for an invetsors group --==|||| def benchmark_growth (ticker):", "######## INPUT DATA ######## ########################################## #Define start and end date for the data", "= data + [lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y =", "buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0)", "= file_name, auto_open = True) return True #||||==-- Plot histogram for growths from", "average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth", "2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime #import", "growth_2] group_labels = ['Group 1', 'Group 2'] # Create distplot with custom bin_size", "#print(\"--- %s seconds to plot impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot", "hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations = [] #Vector with operations", "program again\") return False #||||==-- Growth percentage of the investment for an invetsors", "* shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price *", "group --==|||| def wallet_growth (wallets): total_growth = [] for wallet in wallets: growth", "print(\"--- %s seconds to populate all monkeys ---\" % (time.time() - start_time)) #Generate", "initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE ORDERS FOR", "wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth of: \" + str(np.average(impatient_growth)) )", "== \"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0)", "width = 5 ), name = ticker) data = data + [benchmark] layout", "= data + [benchmark] layout = go.Layout( title = file, xaxis = dict(title='Time'),", "get_prices (ticker_list): stocks_price = {} for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo',", "if ticker in ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker)", "include it and run the program again\") return False #||||==-- Growth percentage of", "(trade_days, freq_op): #trade_days is the days of the experiment #freq_op is the average", "/ stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in list, include it and", "return wallet else: print (\"Ticker not in list, include it and run the", "= \"B\" #B for BUY // S for SELL hold_op = round(np.random.uniform(1, freq_op))", "range(trade_days-1): if op_day < hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op =", "y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width =", "#||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE", "str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all wallets growth ---\" % (time.time()", "title = file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data,", "ticker) data = data + [lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index,", "cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the orders for", "% (time.time() - start_time)) #Generate the dictionaries with the evolutoin of the wallets", "growth_plot (growth_1, growth_2, file): file_name = file + \".html\" # Group data together", "VECTOR WITH THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days", "#Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define the initial", "and end date for the data start = datetime.datetime(2015, 12, 11) end =", "wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth", "stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color = 'rgb(130,130,130)', width", "= 5 ), name = ticker) data = data + [benchmark] layout =", "\" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not in list, include", "FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash = [] shares_owned =", "= (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0]", "return total_growth #||||==-- Plot of wallets for an investors group --==|||| def wallets_plot", "import plotly.figure_factory as ff ########################################## ######## INPUT DATA ######## ########################################## #Define start and", "12, 11) end = datetime.datetime(2019, 12, 10) #Declare the tickers in the analysis", "wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==--", "plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name, auto_open = True)", "#||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population", "(ticker_list): stocks_price = {} for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start,", "= shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x +", "web import numpy as np import plotly import plotly.graph_objects as go import plotly.figure_factory", "(trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==||||", "ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA", "bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type", "(wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys wallets---\" % (time.time()", "auto_open = True) return True #||||==-- Plot histogram for growths from two groups", "= go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line =", "operations = [] #Vector with operations executed op_day = 0 #Build vector with", "[] shares_owned = [] wallet_value = [] for op in operations: if op", "monkey got an average growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds", "growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all wallets", "THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==|||| def get_prices", "[growth_1, growth_2] group_labels = ['Group 1', 'Group 2'] # Create distplot with custom", "= data + [investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y =", "* shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when buy shares share_price", "- stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in list,", "\"B\" #B for BUY // S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days", "= int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex =", "new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when", "PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW", "+ share_price * shares_ex) total_value = [x + y for x, y in", "wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"]", "[x + y for x, y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash,", "growth else: print (\"Ticker not in list, include it and run the program", "shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif", "wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets for an", "free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the orders for an", "share_price * shares_ex) total_value = [x + y for x, y in zip(free_cash,", "freq_op): monkeys = {} for i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op)", "in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==--", "total_value} #||||==-- Execution of the orders for an investors group --==|||| def wallets_evolution", "evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot", "= 5 ), name = ticker) data = data + [lt_evolution] \"\"\" benchmark", "= [growth_1, growth_2] group_labels = ['Group 1', 'Group 2'] # Create distplot with", "dictionaries with the evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients", "start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient monkeys wallets---\"", "21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime", "monkeys wallets ---\" % (time.time() - start_time)) #Calculate the growth for the benchmark", "populate all monkeys ---\" % (time.time() - start_time)) #Generate the dictionaries with the", "show_link = False, output_type = 'file', filename = file_name, auto_open = True) return", "(price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker]", "WITH THE HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price = {} for", ") / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets", "ticker in ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print", "list, include it and run the program again\") return False #||||==-- Growth percentage", "the investment for an invetsors group --==|||| def wallet_growth (wallets): total_growth = []", "y = investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color = 'rgb(130,130,130)', width =", "stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker):", "--==|||| def growth_plot (growth_1, growth_2, file): file_name = file + \".html\" # Group", "\"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient monkeys wallets---\" % (time.time() -", "######## ########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==||||", "data = data + [benchmark] layout = go.Layout( title = file, xaxis =", "impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds", "is a BUY by setting a SELL the last day (if needed) if", "mode = 'lines', line = dict(color = 'rgb(30,30,30)', width = 5 ), name", "(impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all", "= wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all monkeys wallets ---\"", "name = ticker) data = data + [benchmark] layout = go.Layout( title =", "in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op", "share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price)", "OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys = {} for", "---\" % (time.time() - start_time)) #Calculate the growth for the benchmark and the", "DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==|||| def get_prices (ticker_list):", "wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot of", "an investors group --==|||| def wallets_evolution (investors_group, ticker): wallet = {} if ticker", "wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s", "= \"B\" #Avoid last operation is a BUY by setting a SELL the", "[] for op in operations: if op == 0: if len(free_cash) == 0:", "#Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif", "monkey_population (200, 65) print(\"--- %s seconds to populate all monkeys ---\" % (time.time()", "the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"---", "an invetsors group --==|||| def wallet_growth (wallets): total_growth = [] for wallet in", "\" + str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet", "pandas_datareader.data as web import numpy as np import plotly import plotly.graph_objects as go", "\"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime #import pandas as pd", "all wallets growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot", "AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price", "plot impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"---", "wallet_value = [] for op in operations: if op == 0: if len(free_cash)", "return operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==||||", "SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations = [] #Vector with", "got an average growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds to", "price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR", "Execution of the orders for an investors group --==|||| def wallets_evolution (investors_group, ticker):", "to plot impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\")", "= stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)',", "#Avoid last operation is a BUY by setting a SELL the last day", "* initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE ORDERS", "plotly.figure_factory as ff ########################################## ######## INPUT DATA ######## ########################################## #Define start and end", "share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex)", "orders for an investors group --==|||| def wallets_evolution (investors_group, ticker): wallet = {}", "= wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width = 5", "free_cash = [] shares_owned = [] wallet_value = [] for op in operations:", "op == 0: if len(free_cash) == 0: #First day and no buy free_cash.append(initial_cash)", "def growth_plot (growth_1, growth_2, file): file_name = file + \".html\" # Group data", "available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get", "THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND", "if op == 0: if len(free_cash) == 0: #First day and no buy", "Created on Tue Dec 10 21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT", "prices and calculate the days of tradding data available stocks_price = get_prices (ticker_list)", "False #||||==-- Growth percentage of the investment for an invetsors group --==|||| def", "########################################## #Define start and end date for the data start = datetime.datetime(2015, 12,", "shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0:", "start_time)) #Generate the dictionaries with the operations for the monkeys impatient_monkeys = monkey_population", "benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" +", "impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an average", "True) return True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import time start_time =", "operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def", "an investors group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name = file +", "wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1])", "0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when", "#wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys wallets---\" %", "no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1])", "in the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM,", "= round(np.random.uniform(1, freq_op)) #days between operations operations = [] #Vector with operations executed", "datetime #import pandas as pd import pandas_datareader.data as web import numpy as np", "prices ---\" % (time.time() - start_time)) #Generate the dictionaries with the operations for", "True #||||==-- Plot histogram for growths from two groups --==|||| def growth_plot (growth_1,", "as web import numpy as np import plotly import plotly.graph_objects as go import", "the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL,", "#days between operations operations = [] #Vector with operations executed op_day = 0", "file_name, auto_open = True) return True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import", "= investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color = 'rgb(130,130,130)', width = 1),", ") / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in list, include it", "time start_time = time.time() print(\"--- %s seconds at start ---\" % (time.time() -", "mode = 'lines', line = dict(color = 'rgb(130,130,130)', width = 1), name =", "LIBRARIES ######## ########################################## import datetime #import pandas as pd import pandas_datareader.data as web", "the initial amount of money available per investor initial_cash = 10000 ########################################## ########", "go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color", "= ticker) data = data + [lt_evolution] \"\"\" benchmark = go.Scatter( x =", "* (trade_days) operations[0] = \"B\" operations[-1] = \"S\" return operations #||||==-- GET THE", "== \"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0:", "IBM, CL, PG, TSLA #Define the initial amount of money available per investor", "price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2", "file + \".html\" data = [] for wallet in investors_wallets: investor_data = go.Scatter(", "str(wallet)) data = data + [investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index,", "def wallet_growth (wallets): total_growth = [] for wallet in wallets: growth = (", "operations: if op == 0: if len(free_cash) == 0: #First day and no", "wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)]", "(patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all monkeys wallets ---\" % (time.time()", "shares_owned.append(shares_ex) elif op == \"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex", "impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s", "#||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash", "wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate", "= monkey_population (200, 65) print(\"--- %s seconds to populate all monkeys ---\" %", "\"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex", "file + \".html\" # Group data together hist_data = [growth_1, growth_2] group_labels =", "total_growth #||||==-- Plot of wallets for an investors group --==|||| def wallets_plot (investors_wallets,", "ticker): wallet = {} if ticker in ticker_list: for investor in investors_group: wallet[investor]", "start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for every wallet #wallets_plot (wallets_impatients,", "= ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker", "share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price", "for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient", "date for the data start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12,", "\"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys wallets---\" % (time.time() -", "growth for the benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth", "-*- \"\"\" Created on Tue Dec 10 21:44:03 2019 @author: Christian \"\"\" ##########################################", "next_op == \"S\": next_op = \"B\" #Avoid last operation is a BUY by", "elif op == \"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash)", "the days of tradding data available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count()", "#||||==-- Plot histogram for growths from two groups --==|||| def growth_plot (growth_1, growth_2,", "(trade_days) operations[0] = \"B\" operations[-1] = \"S\" return operations #||||==-- GET THE PRICES", "analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG,", "= time.time() print(\"--- %s seconds at start ---\" % (time.time() - start_time)) #Get", "OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is the days", "the program again\") return False #||||==-- Growth percentage of the investment for an", "days of the experiment #freq_op is the average hold time for a position", "growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print", "last operation is a BUY by setting a SELL the last day (if", "the growth for the benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark", "(impatient_growth, patient_growth, \"growth\") #Plot the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\")", "= [x + y for x, y in zip(free_cash, wallet_value)] return {\"Free cash\":", "for op in operations: if op == 0: if len(free_cash) == 0: #First", "[] for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) /", "stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when buy shares", "of tradding data available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s", "the tickers in the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN,", "#||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker", "and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth)", "dict(color = 'rgb(130,130,130)', width = 1), name = \"Investor_\" + str(wallet)) data =", "price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE", "BUY // S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations", "price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash /", "= int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op", "'rgb(30,30,30)', width = 5 ), name = ticker) data = data + [lt_evolution]", "\"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines',", "@author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime #import pandas", "THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is the", "a SELL the last day (if needed) if next_op == \"S\": operations.append(\"S\") else:", "= wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to", "= get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\"", "op_day = 0 if next_op == \"B\": next_op = \"S\" elif next_op ==", "experiment #freq_op is the average hold time for a position next_op = \"B\"", "bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name,", "the dictionaries with the evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\")", "seconds to get prices ---\" % (time.time() - start_time)) #Generate the dictionaries with", "operations #||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH", "CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n,", "= dict(color = 'rgb(130,130,130)', width = 1), name = \"Investor_\" + str(wallet)) data", "\"\"\" Created on Tue Dec 10 21:44:03 2019 @author: Christian \"\"\" ########################################## ########", "= [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define", "in operations: if op == 0: if len(free_cash) == 0: #First day and", "pd import pandas_datareader.data as web import numpy as np import plotly import plotly.graph_objects", "0 if next_op == \"B\": next_op = \"S\" elif next_op == \"S\": next_op", "the benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \"", "= \"S\" return operations #||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE", "########################################## import datetime #import pandas as pd import pandas_datareader.data as web import numpy", "last day (if needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations", "wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all monkeys wallets", "go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name, auto_open", "elif next_op == \"S\": next_op = \"B\" #Avoid last operation is a BUY", "#Calculate the growth for the benchmark and the wallets hpq_growth = benchmark_growth (\"HPQ\")", "end = datetime.datetime(2019, 12, 10) #Declare the tickers in the analysis ticker_list =", "= file + \".html\" # Group data together hist_data = [growth_1, growth_2] group_labels", "operations[0] = \"B\" operations[-1] = \"S\" return operations #||||==-- GET THE PRICES AND", "seconds to plot impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\",", "ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash = [] shares_owned = []", "WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys =", "growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\",", "distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link", "for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \"", "investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" +", "if next_op == \"B\": next_op = \"S\" elif next_op == \"S\": next_op =", "when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] +", "Group data together hist_data = [growth_1, growth_2] group_labels = ['Group 1', 'Group 2']", "= 0 if next_op == \"B\": next_op = \"S\" elif next_op == \"S\":", "filename = file_name, auto_open = True) return True ########################################## ######## SIMULATION EXECUTION ########", "- start_time)) #Calculate the growth for the benchmark and the wallets hpq_growth =", "Plot of wallets for an investors group --==|||| def wallets_plot (investors_wallets, ticker, file):", "operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS", "(\"Ticker not in list, include it and run the program again\") return False", "with the operations for the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys =", "= wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth of: \" + str(np.average(impatient_growth))", "########################################## ######## INPUT DATA ######## ########################################## #Define start and end date for the", "stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width", "THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys = {}", "in ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey", "share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days", "'rgb(30,30,30)', width = 5 ), name = ticker) data = data + [benchmark]", "#Get the prices and calculate the days of tradding data available stocks_price =", "+ y for x, y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet", "monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR WITH LONG TERM OPERATION", "start_time)) #Calculate the growth for the benchmark and the wallets hpq_growth = benchmark_growth", "= stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\" % (time.time() - start_time))", "######## ########################################## import datetime #import pandas as pd import pandas_datareader.data as web import", "seconds to populate all monkeys ---\" % (time.time() - start_time)) #Generate the dictionaries", "the last day (if needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return", "shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x + y", "histogram for growths from two groups --==|||| def growth_plot (growth_1, growth_2, file): file_name", "width = 1), name = \"Investor_\" + str(wallet)) data = data + [investor_data]", "an invetsors group --==|||| def benchmark_growth (ticker): if ticker in ticker_list: growth =", "ff ########################################## ######## INPUT DATA ######## ########################################## #Define start and end date for", "it and run the program again\") return False #||||==-- Growth percentage of the", "the evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution", "SELL the last day (if needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0)", "[0] * (trade_days) operations[0] = \"B\" operations[-1] = \"S\" return operations #||||==-- GET", "= True) return True #||||==-- Plot histogram for growths from two groups --==||||", "#freq_op is the average hold time for a position next_op = \"B\" #B", "data + [investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"],", "the experiment #freq_op is the average hold time for a position next_op =", "all monkeys ---\" % (time.time() - start_time)) #Generate the dictionaries with the evolutoin", "+ str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else:", "\"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0)", "range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR WITH", "(time.time() - start_time)) #Generate the dictionaries with the evolutoin of the wallets wallets_impatients", "ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else:", "ticker, file): file_name = file + \".html\" data = [] for wallet in", "ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) /", "(200, 65) print(\"--- %s seconds to populate all monkeys ---\" % (time.time() -", "/ price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE", "data = data + [investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y", "in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR", "the data start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10) #Declare", "= datetime.datetime(2019, 12, 10) #Declare the tickers in the analysis ticker_list = [\"HPQ\"]", "#Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1]", "end date for the data start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019,", "Plot histogram for growths from two groups --==|||| def growth_plot (growth_1, growth_2, file):", "investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor)) #print", "\".html\" data = [] for wallet in investors_wallets: investor_data = go.Scatter( x =", "shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1]", "(time.time() - start_time)) #Calculate the growth for the benchmark and the wallets hpq_growth", "average growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all", "price return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution", ") impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an", ") return wallet else: print (\"Ticker not in list, include it and run", "(time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient", "10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH THE", "A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op):", "#||||==-- Execution of the orders for an investors group --==|||| def wallets_evolution (investors_group,", "= stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value", "for x, y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value,", "ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] +", "freq_op)) #days between operations operations = [] #Vector with operations executed op_day =", "{\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the orders", "[investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode =", "+ [lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode", "calculate the days of tradding data available stocks_price = get_prices (ticker_list) trade_days =", "= ticker) data = data + [benchmark] layout = go.Layout( title = file,", "\"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS", "np import plotly import plotly.graph_objects as go import plotly.figure_factory as ff ########################################## ########", "{} for i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==--", "- wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot", "(wallets): total_growth = [] for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] -", "\"Investor_\" + str(wallet)) data = data + [investor_data] \"\"\" lt_evolution = go.Scatter( x", "wallet_evolution (operations, ticker): free_cash = [] shares_owned = [] wallet_value = [] for", "= [] for wallet in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y", "else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF MULTIPLE", "AMZN, WM, IBM, CL, PG, TSLA #Define the initial amount of money available", "Tue Dec 10 21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ########", "% (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for every", "data = data + [lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y", "start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] *", "stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in list, include", "% (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Dec 10 21:44:03 2019", "== \"S\": next_op = \"B\" #Avoid last operation is a BUY by setting", "stocks_price = {} for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end)", "#B for BUY // S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between", "stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value =", "free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x + y for x, y", "* shares_ex) total_value = [x + y for x, y in zip(free_cash, wallet_value)]", "#wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient monkeys wallets---\" %", "DATA ######## ########################################## #Define start and end date for the data start =", "5 ), name = ticker) data = data + [lt_evolution] \"\"\" benchmark =", "curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name, auto_open", "= True) return True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import time start_time", "), name = ticker) data = data + [lt_evolution] \"\"\" benchmark = go.Scatter(", "freq_op): #trade_days is the days of the experiment #freq_op is the average hold", "data + [lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"],", "print(\"--- %s seconds to calculate all monkeys wallets ---\" % (time.time() - start_time))", "operations[-1] = \"S\" return operations #||||==-- GET THE PRICES AND ADD THE DAILY", "the investment for an invetsors group --==|||| def benchmark_growth (ticker): if ticker in", "= ['Group 1', 'Group 2'] # Create distplot with custom bin_size fig =", "str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got", "10) #Declare the tickers in the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC,", "x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color =", "= [0] * (trade_days) operations[0] = \"B\" operations[-1] = \"S\" return operations #||||==--", "elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] >", "'file', filename = file_name, auto_open = True) return True ########################################## ######## SIMULATION EXECUTION", "in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode =", "patient_growth, \"growth\") #Plot the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"---", "PG, TSLA #Define the initial amount of money available per investor initial_cash =", "wallets_evolution (investors_group, ticker): wallet = {} if ticker in ticker_list: for investor in", "days of tradding data available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"---", "all monkeys wallets ---\" % (time.time() - start_time)) #Calculate the growth for the", "(time.time() - start_time)) #Get the prices and calculate the days of tradding data", "+ str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all wallets growth ---\" %", "--==|||| def lt_broker (trade_days): operations = [0] * (trade_days) operations[0] = \"B\" operations[-1]", "with operations executed op_day = 0 #Build vector with operations for i in", "--==|||| def wallets_plot (investors_wallets, ticker, file): file_name = file + \".html\" data =", "two groups --==|||| def growth_plot (growth_1, growth_2, file): file_name = file + \".html\"", "(investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor)) #print (\"Ends period with: \"", "str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print", "benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients)", "%s seconds to plot impatient monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients,", "{} for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] =", "--==|||| def wallets_evolution (investors_group, ticker): wallet = {} if ticker in ticker_list: for", "(investors_wallets, ticker, file): file_name = file + \".html\" data = [] for wallet", "number: \" + str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return", "go.Layout( title = file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig =", "(operations, ticker): free_cash = [] shares_owned = [] wallet_value = [] for op", "#Generate the dictionaries with the operations for the monkeys impatient_monkeys = monkey_population (200,", "available per investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==--", "wallets ---\" % (time.time() - start_time)) #Calculate the growth for the benchmark and", "( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth", "of the orders for an investors group --==|||| def wallets_evolution (investors_group, ticker): wallet", "CREATE A VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations =", "= monkey_population (200, 8) patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds to", "for an invetsors group --==|||| def benchmark_growth (ticker): if ticker in ticker_list: growth", "benchmark_growth (ticker): if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] )", "shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when sell shares", "+ str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey", "#Declare the tickers in the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ,", "day (if needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==--", "fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name, auto_open =", "= [] for op in operations: if op == 0: if len(free_cash) ==", "investor_data = go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line", "hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op ==", "start_time)) #Get the prices and calculate the days of tradding data available stocks_price", "to calculate all wallets growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth,", "(200, 8) patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds to populate all", "else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex)", "12, 10) #Declare the tickers in the analysis ticker_list = [\"HPQ\"] #Other tickers:", "start and end date for the data start = datetime.datetime(2015, 12, 11) end", "of money available per investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ########", "+ str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth of: \" + str(np.average(patient_growth))", "'file', filename = file_name, auto_open = True) return True #||||==-- Plot histogram for", "calculate all wallets growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\")", "op in operations: if op == 0: if len(free_cash) == 0: #First day", "return growth else: print (\"Ticker not in list, include it and run the", "#Build vector with operations for i in range(trade_days-1): if op_day < hold_op: operations.append(0)", "= [] shares_owned = [] wallet_value = [] for op in operations: if", "of the experiment #freq_op is the average hold time for a position next_op", "(growth_1, growth_2, file): file_name = file + \".html\" # Group data together hist_data", "% (time.time() - start_time)) #Generate the dictionaries with the operations for the monkeys", "print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth =", "total_growth = [] for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0]", "wallet_value.append(new_value) elif op == \"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if", "if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0]", "shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when buy shares share_price =", "go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color", "MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys = {} for i in", "dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False,", "for BUY // S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations", "price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price", "/ share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] /", "---\" % (time.time() - start_time)) #Get the prices and calculate the days of", "average hold time for a position next_op = \"B\" #B for BUY //", "\"growth\") #Plot the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s", "#||||==-- Growth percentage of the investment for an invetsors group --==|||| def wallet_growth", "of wallets for an investors group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name", "custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False,", "the wallets hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) )", "hist_data = [growth_1, growth_2] group_labels = ['Group 1', 'Group 2'] # Create distplot", "dictionaries with the operations for the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys", "growth_2, file): file_name = file + \".html\" # Group data together hist_data =", "the average hold time for a position next_op = \"B\" #B for BUY", "auto_open = True) return True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import time", "######## SIMULATION EXECUTION ######## ########################################## import time start_time = time.time() print(\"--- %s seconds", "ticker) data = data + [benchmark] layout = go.Layout( title = file, xaxis", "in list, include it and run the program again\") return False #||||==-- Growth", "#Generate the dictionaries with the evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys,", "(\"Monkey number: \" + str(investor)) #print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) )", "between operations operations = [] #Vector with operations executed op_day = 0 #Build", "########################################## import time start_time = time.time() print(\"--- %s seconds at start ---\" %", "from two groups --==|||| def growth_plot (growth_1, growth_2, file): file_name = file +", "HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price = {} for ticker in", "investment for an invetsors group --==|||| def wallet_growth (wallets): total_growth = [] for", "Dec 10 21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ##########################################", "def monkey_population (n, freq_op): monkeys = {} for i in range(n): monkeys[i] =", "--==|||| def wallet_evolution (operations, ticker): free_cash = [] shares_owned = [] wallet_value =", "(trade_days): operations = [0] * (trade_days) operations[0] = \"B\" operations[-1] = \"S\" return", "percentage of the investment for an invetsors group --==|||| def wallet_growth (wallets): total_growth", "- start_time)) #Get the prices and calculate the days of tradding data available", "total_growth.append(growth) return total_growth #||||==-- Plot of wallets for an investors group --==|||| def", "def monkey_broker (trade_days, freq_op): #trade_days is the days of the experiment #freq_op is", "IMPORT LIBRARIES ######## ########################################## import datetime #import pandas as pd import pandas_datareader.data as", "group --==|||| def wallets_evolution (investors_group, ticker): wallet = {} if ticker in ticker_list:", "start_time = time.time() print(\"--- %s seconds at start ---\" % (time.time() - start_time))", "== 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days", "monkeys wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds", "money available per investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ##########################################", "import time start_time = time.time() print(\"--- %s seconds at start ---\" % (time.time()", "is: \" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients)", "LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations = [0] * (trade_days) operations[0]", "position next_op = \"B\" #B for BUY // S for SELL hold_op =", "seconds to calculate all wallets growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth,", "HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define the initial amount of money", "dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file',", "get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\" %", "INPUT DATA ######## ########################################## #Define start and end date for the data start", "print(\"Impatient monkey got an average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey", "#Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold", "if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY", "op_day = 0 #Build vector with operations for i in range(trade_days-1): if op_day", "to populate all monkeys ---\" % (time.time() - start_time)) #Generate the dictionaries with", "as np import plotly import plotly.graph_objects as go import plotly.figure_factory as ff ##########################################", "with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link =", "( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not", "VALUE CALCULATED WITH THE HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price =", "+ str(wallet)) data = data + [investor_data] \"\"\" lt_evolution = go.Scatter( x =", "> 0: #Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1])", "free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1]", "LOW --==|||| def get_prices (ticker_list): stocks_price = {} for ticker in ticker_list: price", "initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR", "[benchmark] layout = go.Layout( title = file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary", "[] wallet_value = [] for op in operations: if op == 0: if", "== \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH THE", "next_op = \"B\" #Avoid last operation is a BUY by setting a SELL", "freq_op)) op_day = 0 if next_op == \"B\": next_op = \"S\" elif next_op", "= price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE", "stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1])", "wallet_value, \"Total\": total_value} #||||==-- Execution of the orders for an investors group --==||||", "--==|||| def benchmark_growth (ticker): if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] -", "ticker): free_cash = [] shares_owned = [] wallet_value = [] for op in", "print (\"Ticker not in list, include it and run the program again\") return", "(n, freq_op): monkeys = {} for i in range(n): monkeys[i] = monkey_broker (trade_days,", "setting a SELL the last day (if needed) if next_op == \"S\": operations.append(\"S\")", "shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)]", "value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the orders for an investors group", "CL, PG, TSLA #Define the initial amount of money available per investor initial_cash", "hold time for a position next_op = \"B\" #B for BUY // S", "11) end = datetime.datetime(2019, 12, 10) #Declare the tickers in the analysis ticker_list", "= {} for i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys", "end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash", "tradding data available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds", "next_op = \"B\" #B for BUY // S for SELL hold_op = round(np.random.uniform(1,", "invetsors group --==|||| def benchmark_growth (ticker): if ticker in ticker_list: growth = (", "tickers in the analysis ticker_list = [\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM,", "= stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color = 'rgb(130,130,130)',", "as pd import pandas_datareader.data as web import numpy as np import plotly import", "#Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex =", "shares_owned = [] wallet_value = [] for op in operations: if op ==", "(if needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE", "MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is the days of the experiment", "wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of", "investors group --==|||| def wallets_evolution (investors_group, ticker): wallet = {} if ticker in", "else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if next_op == \"B\":", "def benchmark_growth (ticker): if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0]", "import pandas_datareader.data as web import numpy as np import plotly import plotly.graph_objects as", "ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash = [] shares_owned", "\"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the orders for an investors", "shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x", "stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width", "= [] #Vector with operations executed op_day = 0 #Build vector with operations", "with the evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients =", "start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10) #Declare the tickers", "= wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth", "web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"] =", "\"S\" return operations #||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE VALUE", "######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS OF", "round(np.random.uniform(1, freq_op)) op_day = 0 if next_op == \"B\": next_op = \"S\" elif", "def get_prices (ticker_list): stocks_price = {} for ticker in ticker_list: price = web.DataReader(ticker,", "= [] for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] )", "file_name = file + \".html\" # Group data together hist_data = [growth_1, growth_2]", "OPERATION --==|||| def lt_broker (trade_days): operations = [0] * (trade_days) operations[0] = \"B\"", "shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex", "else: print (\"Ticker not in list, include it and run the program again\")", "data + [benchmark] layout = go.Layout( title = file, xaxis = dict(title='Time'), yaxis", "growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets for an investors group --==||||", "\"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all monkeys", "group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name = file + \".html\" data", "of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all wallets growth", "---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for", "wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds", "########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS", "\" + str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate all wallets growth ---\"", ") print(\"--- %s seconds to calculate all wallets growth ---\" % (time.time() -", "every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys", "^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define the initial amount of", "sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price", "'lines', line = dict(color = 'rgb(30,30,30)', width = 5 ), name = ticker)", "= stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price *", "invetsors group --==|||| def wallet_growth (wallets): total_growth = [] for wallet in wallets:", "print(\"Patient monkey got an average growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s", "operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH THE OPERATIONS OF", "investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines',", "to calculate all monkeys wallets ---\" % (time.time() - start_time)) #Calculate the growth", "patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth of: \" +", "== 0: if len(free_cash) == 0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0)", "data available stocks_price = get_prices (ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to", "True) return True #||||==-- Plot histogram for growths from two groups --==|||| def", "filename = file_name, auto_open = True) return True #||||==-- Plot histogram for growths", "for growths from two groups --==|||| def growth_plot (growth_1, growth_2, file): file_name =", "and calculate the days of tradding data available stocks_price = get_prices (ticker_list) trade_days", "op_day < hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op))", "op == \"B\": #Days when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) ==", "ticker) #print (\"Monkey number: \" + str(investor)) #print (\"Ends period with: \" +", "total_value = [x + y for x, y in zip(free_cash, wallet_value)] return {\"Free", "wallet else: print (\"Ticker not in list, include it and run the program", "as go import plotly.figure_factory as ff ########################################## ######## INPUT DATA ######## ########################################## #Define", "is the days of the experiment #freq_op is the average hold time for", "%s seconds to get prices ---\" % (time.time() - start_time)) #Generate the dictionaries", "get prices ---\" % (time.time() - start_time)) #Generate the dictionaries with the operations", "operations = [0] * (trade_days) operations[0] = \"B\" operations[-1] = \"S\" return operations", "wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price", "data = [] for wallet in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index,", "[] for wallet in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y =", "), name = ticker) data = data + [benchmark] layout = go.Layout( title", "ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH AND LOW --==|||| def", "fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file', filename =", "monkeys ---\" % (time.time() - start_time)) #Generate the dictionaries with the evolutoin of", "file): file_name = file + \".html\" data = [] for wallet in investors_wallets:", "\"S\": next_op = \"B\" #Avoid last operation is a BUY by setting a", "ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number:", "--==|||| def wallet_growth (wallets): total_growth = [] for wallet in wallets: growth =", "%s seconds to populate all monkeys ---\" % (time.time() - start_time)) #Generate the", "amount of money available per investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION", "trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\" % (time.time() -", "OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys = {} for i", "to get prices ---\" % (time.time() - start_time)) #Generate the dictionaries with the", "investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color = 'rgb(130,130,130)', width = 1), name", "%s seconds to calculate all wallets growth ---\" % (time.time() - start_time)) growth_plot", "8) patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds to populate all monkeys", "again\") return False #||||==-- Growth percentage of the investment for an invetsors group", "\"HPQ\") print(\"--- %s seconds to calculate all monkeys wallets ---\" % (time.time() -", "- start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for every wallet #wallets_plot", "in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"])", "= 0 #Build vector with operations for i in range(trade_days-1): if op_day <", "\"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines',", "wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient monkey got an average growth of:", "utf-8 -*- \"\"\" Created on Tue Dec 10 21:44:03 2019 @author: Christian \"\"\"", "i in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day += 1 else: operations.append(next_op)", "wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor)) #print (\"Ends period with:", "next_op = \"S\" elif next_op == \"S\": next_op = \"B\" #Avoid last operation", "def wallets_evolution (investors_group, ticker): wallet = {} if ticker in ticker_list: for investor", "the prices and calculate the days of tradding data available stocks_price = get_prices", "investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A", "when buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash", "wallets_evolution (patient_monkeys, \"HPQ\") print(\"--- %s seconds to calculate all monkeys wallets ---\" %", "#Define start and end date for the data start = datetime.datetime(2015, 12, 11)", "GET THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED WITH THE HIGH", "monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR WITH LONG", "time.time() print(\"--- %s seconds at start ---\" % (time.time() - start_time)) #Get the", "BUY by setting a SELL the last day (if needed) if next_op ==", "a position next_op = \"B\" #B for BUY // S for SELL hold_op", "wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1]", "def wallet_evolution (operations, ticker): free_cash = [] shares_owned = [] wallet_value = []", "= 'rgb(30,30,30)', width = 5 ), name = ticker) data = data +", "= {} for ticker in ticker_list: price = web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"]", "int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1]", "2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return stocks_price", "y for x, y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\":", "= file + \".html\" data = [] for wallet in investors_wallets: investor_data =", "= benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth = wallet_growth", "operations for i in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day += 1", "wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when", "benchmark = go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line", "hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day =", "+ [benchmark] layout = go.Layout( title = file, xaxis = dict(title='Time'), yaxis =", "VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations = [0] *", "---\" % (time.time() - start_time)) #Generate the dictionaries with the evolutoin of the", "= dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link =", "growth is: \" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth", "/ 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] = price return", "output_type = 'file', filename = file_name, auto_open = True) return True ########################################## ########", "S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations = []", "\"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys wallets---\" % (time.time() - start_time))", "= 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH", "(\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not", "patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds to populate all monkeys ---\"", "(ticker): if ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) /", "True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import time start_time = time.time() print(\"---", "\".html\" # Group data together hist_data = [growth_1, growth_2] group_labels = ['Group 1',", "= 'file', filename = file_name, auto_open = True) return True ########################################## ######## SIMULATION", "########################################## ######## SIMULATION EXECUTION ######## ########################################## import time start_time = time.time() print(\"--- %s", "[lt_evolution] \"\"\" benchmark = go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode =", "zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution", "for i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE", "for the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population (200, 65)", "(wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient monkeys wallets---\" % (time.time()", "+ \".html\" data = [] for wallet in investors_wallets: investor_data = go.Scatter( x", "= file_name, auto_open = True) return True ########################################## ######## SIMULATION EXECUTION ######## ##########################################", "the operations for the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population", "---\" % (time.time() - start_time)) #Generate the dictionaries with the operations for the", "A DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op):", "the orders for an investors group --==|||| def wallets_evolution (investors_group, ticker): wallet =", "= \"S\" elif next_op == \"S\": next_op = \"B\" #Avoid last operation is", "/ wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets for", "+ str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not in list, include it", "next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A DICTIONARY WITH", "'rgb(130,130,130)', width = 1), name = \"Investor_\" + str(wallet)) data = data +", "for an investors group --==|||| def wallets_evolution (investors_group, ticker): wallet = {} if", "xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link", "growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth of:", "the dictionaries with the operations for the monkeys impatient_monkeys = monkey_population (200, 8)", "= dict(color = 'rgb(30,30,30)', width = 5 ), name = ticker) data =", "for wallet in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"],", "%s seconds to calculate all monkeys wallets ---\" % (time.time() - start_time)) #Calculate", "// S for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations =", "stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in list, include it and run", "= stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)',", "0: #Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value)", "= file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout)", "#Vector with operations executed op_day = 0 #Build vector with operations for i", "########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime #import pandas as pd import", "import datetime #import pandas as pd import pandas_datareader.data as web import numpy as", "stocks_price[ticker] = price return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==||||", "op == \"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1]", "= 'file', filename = file_name, auto_open = True) return True #||||==-- Plot histogram", "= 'lines', line = dict(color = 'rgb(130,130,130)', width = 1), name = \"Investor_\"", "<reponame>cferrisroig/monkey-broker<filename>monkey-broker.py # -*- coding: utf-8 -*- \"\"\" Created on Tue Dec 10 21:44:03", "investment for an invetsors group --==|||| def benchmark_growth (ticker): if ticker in ticker_list:", "= web.DataReader(ticker, 'yahoo', start, end) price[\"AVG\"] = (price[\"High\"] + price[\"Low\"]) / 2 price[\"Benchmark\"]", "in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth", "len(free_cash) == 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash -", "monkeys = {} for i in range(n): monkeys[i] = monkey_broker (trade_days, freq_op) return", "monkey_population (n, freq_op): monkeys = {} for i in range(n): monkeys[i] = monkey_broker", "(\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth", "wallets growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the", "monkey got an average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got", "operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if next_op == \"B\": next_op", "go.Scatter( x = stocks_price[ticker].index, y = stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color", "ticker in ticker_list: growth = ( stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return", "import plotly.graph_objects as go import plotly.figure_factory as ff ########################################## ######## INPUT DATA ########", "executed op_day = 0 #Build vector with operations for i in range(trade_days-1): if", "monkey_broker (trade_days, freq_op): #trade_days is the days of the experiment #freq_op is the", "return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value} #||||==-- Execution of the", "THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is the days of the", "wallets for an investors group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name =", "\" + str(hpq_growth) ) impatient_growth = wallet_growth (wallets_impatients) patient_growth = wallet_growth (wallets_patients) print(\"Impatient", "monkeys #||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days):", "shares_ex) total_value = [x + y for x, y in zip(free_cash, wallet_value)] return", "-*- coding: utf-8 -*- \"\"\" Created on Tue Dec 10 21:44:03 2019 @author:", "return False #||||==-- Growth percentage of the investment for an invetsors group --==||||", "stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\" % (time.time() - start_time)) #Generate", "0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else:", "= ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file',", "- start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to plot patient monkeys", "\" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth of: \" +", "monkey_population (200, 8) patient_monkeys = monkey_population (200, 65) print(\"--- %s seconds to populate", "free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0)", "per investor initial_cash = 10000 ########################################## ######## FUNCTIONS DECLARATION ######## ########################################## #||||==-- CREATE", "stocks_price[ticker][\"AVG\"][-1] - stocks_price[ticker][\"AVG\"][0] ) / stocks_price[ticker][\"AVG\"][0] return growth else: print (\"Ticker not in", "\"S\" elif next_op == \"S\": next_op = \"B\" #Avoid last operation is a", "data start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10) #Declare the", "the monkeys impatient_monkeys = monkey_population (200, 8) patient_monkeys = monkey_population (200, 65) print(\"---", "'lines', line = dict(color = 'rgb(130,130,130)', width = 1), name = \"Investor_\" +", "wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)]", "######## ########################################## import time start_time = time.time() print(\"--- %s seconds at start ---\"", "0: #First day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0:", "is the average hold time for a position next_op = \"B\" #B for", "= {} if ticker in ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution", "with operations for i in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day +=", "int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op ==", "name = \"Investor_\" + str(wallet)) data = data + [investor_data] \"\"\" lt_evolution =", "of the investment for an invetsors group --==|||| def benchmark_growth (ticker): if ticker", "SIMULATION EXECUTION ######## ########################################## import time start_time = time.time() print(\"--- %s seconds at", "DICTIONARY WITH THE OPERATIONS OF MULTIPLE MONKEYS --==|||| def monkey_population (n, freq_op): monkeys", "wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth", "== 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash - wallet_value[-1])", "%s seconds at start ---\" % (time.time() - start_time)) #Get the prices and", "shares_ex) free_cash.append(initial_cash - wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex)", "period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not in", "growth ---\" % (time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution", "without stocks free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold position", "x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line = dict(color =", "return True #||||==-- Plot histogram for growths from two groups --==|||| def growth_plot", "#||||==-- Growth percentage of the investment for an invetsors group --==|||| def benchmark_growth", "= go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type = 'file', filename = file_name,", "tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define the initial amount", "+ price[\"Low\"]) / 2 price[\"Benchmark\"] = price[\"AVG\"] * initial_cash / price[\"AVG\"][0] stocks_price[ticker] =", "by setting a SELL the last day (if needed) if next_op == \"S\":", "in investors_group: wallet[investor] = wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor))", "file_name = file + \".html\" data = [] for wallet in investors_wallets: investor_data", "the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to", "elif op == \"S\": #Days when sell shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex =", "an average growth of: \" + str(np.average(patient_growth)) ) print(\"--- %s seconds to calculate", "#import pandas as pd import pandas_datareader.data as web import numpy as np import", "for the data start = datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10)", "= price return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def", "(ticker_list) trade_days = stocks_price[ticker_list[0]][\"AVG\"].count() print(\"--- %s seconds to get prices ---\" % (time.time()", "10 21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import", "elif shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] *", "wallets_plot (investors_wallets, ticker, file): file_name = file + \".html\" data = [] for", "Create distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig,", "group_labels = ['Group 1', 'Group 2'] # Create distplot with custom bin_size fig", "\"Total\": total_value} #||||==-- Execution of the orders for an investors group --==|||| def", "return stocks_price #||||==-- EXECUTE THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations,", "= stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op == \"B\": #Days when buy", "at start ---\" % (time.time() - start_time)) #Get the prices and calculate the", "--==|||| def monkey_broker (trade_days, freq_op): #trade_days is the days of the experiment #freq_op", "- wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\": #Days when sell shares share_price =", "wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width = 5 ),", "DECLARATION ######## ########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS", "seconds at start ---\" % (time.time() - start_time)) #Get the prices and calculate", "datetime.datetime(2019, 12, 10) #Declare the tickers in the analysis ticker_list = [\"HPQ\"] #Other", "#print (\"Ends period with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker", "5 ), name = ticker) data = data + [benchmark] layout = go.Layout(", "hpq_growth = benchmark_growth (\"HPQ\") print(\"Benchmark growth is: \" + str(hpq_growth) ) impatient_growth =", "+= 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if next_op", "EXECUTION ######## ########################################## import time start_time = time.time() print(\"--- %s seconds at start", "group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type = 'file', filename =", "= \"Investor_\" + str(wallet)) data = data + [investor_data] \"\"\" lt_evolution = go.Scatter(", "1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if next_op ==", "\"B\" operations[-1] = \"S\" return operations #||||==-- GET THE PRICES AND ADD THE", "= stocks_price[ticker][\"Benchmark\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width = 5", "an average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an average", "shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1]) new_value =", "= go.Layout( title = file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig", "fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show() plotly.offline.plot(fig, show_link = False, output_type =", "print(\"--- %s seconds at start ---\" % (time.time() - start_time)) #Get the prices", "/ share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif op == \"S\":", "in wallets: growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] =", "calculate all monkeys wallets ---\" % (time.time() - start_time)) #Calculate the growth for", "for a position next_op = \"B\" #B for BUY // S for SELL", "file): file_name = file + \".html\" # Group data together hist_data = [growth_1,", "data together hist_data = [growth_1, growth_2] group_labels = ['Group 1', 'Group 2'] #", "(time.time() - start_time)) growth_plot (impatient_growth, patient_growth, \"growth\") #Plot the evolution for every wallet", "WITH LONG TERM OPERATION --==|||| def lt_broker (trade_days): operations = [0] * (trade_days)", "percentage of the investment for an invetsors group --==|||| def benchmark_growth (ticker): if", "dict(color = 'rgb(30,30,30)', width = 5 ), name = ticker) data = data", "together hist_data = [growth_1, growth_2] group_labels = ['Group 1', 'Group 2'] # Create", "[\"HPQ\"] #Other tickers: ^GSPC, HPQ, AMZN, WM, IBM, CL, PG, TSLA #Define the", "+ [investor_data] \"\"\" lt_evolution = go.Scatter( x = stocks_price[ticker].index, y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode", "numpy as np import plotly import plotly.graph_objects as go import plotly.figure_factory as ff", "yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type", "#Plot the evolution for every wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds", "op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if", "WM, IBM, CL, PG, TSLA #Define the initial amount of money available per", "--==|||| def get_prices (ticker_list): stocks_price = {} for ticker in ticker_list: price =", "= ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return", "#||||==-- Plot of wallets for an investors group --==|||| def wallets_plot (investors_wallets, ticker,", "free_cash.append(free_cash[-1]) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] > 0: #Days when hold position free_cash.append(free_cash[-1]) new_value", "THE ORDERS FOR ONE INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash = []", "(investors_group, ticker): wallet = {} if ticker in ticker_list: for investor in investors_group:", "if len(free_cash) == 0: shares_ex = int(initial_cash / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(initial_cash", "of the investment for an invetsors group --==|||| def wallet_growth (wallets): total_growth =", "wallet in investors_wallets: investor_data = go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode", "operations executed op_day = 0 #Build vector with operations for i in range(trade_days-1):", "'Group 2'] # Create distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05,", "go import plotly.figure_factory as ff ########################################## ######## INPUT DATA ######## ########################################## #Define start", "str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not in list, include it and", "the days of the experiment #freq_op is the average hold time for a", "= \"B\" operations[-1] = \"S\" return operations #||||==-- GET THE PRICES AND ADD", "needed) if next_op == \"S\": operations.append(\"S\") else: operations.append(0) return operations #||||==-- CREATE A", "when hold position free_cash.append(free_cash[-1]) new_value = stocks_price[ticker][\"AVG\"][len(shares_owned)] * shares_owned[-1] shares_owned.append(shares_owned[-1]) wallet_value.append(new_value) elif op", "line = dict(color = 'rgb(30,30,30)', width = 5 ), name = ticker) data", "file_name, auto_open = True) return True #||||==-- Plot histogram for growths from two", "WITH THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is", "plotly.graph_objects as go import plotly.figure_factory as ff ########################################## ######## INPUT DATA ######## ##########################################", "= 1), name = \"Investor_\" + str(wallet)) data = data + [investor_data] \"\"\"", "wallets---\" % (time.time() - start_time)) #wallets_plot (wallets_patients, \"HPQ\", \"Prueba2\") #print(\"--- %s seconds to", "for i in range(trade_days-1): if op_day < hold_op: operations.append(0) op_day += 1 else:", "TERM OPERATION --==|||| def lt_broker (trade_days): operations = [0] * (trade_days) operations[0] =", "wallet_growth (wallets): total_growth = [] for wallet in wallets: growth = ( wallets[wallet][\"Total\"][-1]", "investors group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name = file + \".html\"", "got an average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient monkey got an", "CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==|||| def monkey_broker (trade_days,", "operations.append(0) op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day = 0", "- start_time)) #Generate the dictionaries with the evolutoin of the wallets wallets_impatients =", "line = dict(color = 'rgb(130,130,130)', width = 1), name = \"Investor_\" + str(wallet))", "\"B\" #Avoid last operation is a BUY by setting a SELL the last", "for an investors group --==|||| def wallets_plot (investors_wallets, ticker, file): file_name = file", "start_time)) #Generate the dictionaries with the evolutoin of the wallets wallets_impatients = wallets_evolution", "[] #Vector with operations executed op_day = 0 #Build vector with operations for", "Christian \"\"\" ########################################## ######## IMPORT LIBRARIES ######## ########################################## import datetime #import pandas as", "= round(np.random.uniform(1, freq_op)) op_day = 0 if next_op == \"B\": next_op = \"S\"", "growth = ( wallets[wallet][\"Total\"][-1] - wallets[wallet][\"Total\"][0] ) / wallets[wallet][\"Total\"][0] wallets[wallet][\"Growth\"] = growth total_growth.append(growth)", "with: \" + str(wallet[investor][\"Total\"][-1]) ) return wallet else: print (\"Ticker not in list,", "initial amount of money available per investor initial_cash = 10000 ########################################## ######## FUNCTIONS", "y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\": total_value}", "(wallets_patients) print(\"Impatient monkey got an average growth of: \" + str(np.average(impatient_growth)) ) print(\"Patient", "def wallets_plot (investors_wallets, ticker, file): file_name = file + \".html\" data = []", "False, output_type = 'file', filename = file_name, auto_open = True) return True #||||==--", "['Group 1', 'Group 2'] # Create distplot with custom bin_size fig = ff.create_distplot(hist_data,", "% (time.time() - start_time)) #Get the prices and calculate the days of tradding", "THE HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price = {} for ticker", "y = wallet_evolution(lt_broker(trade_days),ticker)[\"Total\"], mode = 'lines', line = dict(color = 'rgb(30,30,30)', width =", "pandas as pd import pandas_datareader.data as web import numpy as np import plotly", "######## IMPORT LIBRARIES ######## ########################################## import datetime #import pandas as pd import pandas_datareader.data", "CALCULATED WITH THE HIGH AND LOW --==|||| def get_prices (ticker_list): stocks_price = {}", "wallet #wallets_plot (wallets_impatients, \"HPQ\", \"Prueba1\") #print(\"--- %s seconds to plot impatient monkeys wallets---\"", "+ \".html\" # Group data together hist_data = [growth_1, growth_2] group_labels = ['Group", "return True ########################################## ######## SIMULATION EXECUTION ######## ########################################## import time start_time = time.time()", "on Tue Dec 10 21:44:03 2019 @author: Christian \"\"\" ########################################## ######## IMPORT LIBRARIES", "2'] # Create distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal')", "= go.Scatter( x = stocks_price[ticker].index, y = investors_wallets[wallet][\"Total\"], mode = 'lines', line =", "name = ticker) data = data + [lt_evolution] \"\"\" benchmark = go.Scatter( x", "operations operations = [] #Vector with operations executed op_day = 0 #Build vector", "(time.time() - start_time)) #Generate the dictionaries with the operations for the monkeys impatient_monkeys", "str(np.average(impatient_growth)) ) print(\"Patient monkey got an average growth of: \" + str(np.average(patient_growth)) )", "return operations #||||==-- GET THE PRICES AND ADD THE DAILY AVERAGE VALUE CALCULATED", "file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig,", "\"B\": next_op = \"S\" elif next_op == \"S\": next_op = \"B\" #Avoid last", "and run the program again\") return False #||||==-- Growth percentage of the investment", "freq_op) return monkeys #||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==|||| def", "{} if ticker in ticker_list: for investor in investors_group: wallet[investor] = wallet_evolution (investors_group[investor],", "lt_broker (trade_days): operations = [0] * (trade_days) operations[0] = \"B\" operations[-1] = \"S\"", "= dict(title='Monetary Units')) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, show_link = False, output_type =", "return monkeys #||||==-- CREATE A VECTOR WITH LONG TERM OPERATION --==|||| def lt_broker", "== \"B\": next_op = \"S\" elif next_op == \"S\": next_op = \"B\" #Avoid", "a BUY by setting a SELL the last day (if needed) if next_op", "- start_time)) #Generate the dictionaries with the operations for the monkeys impatient_monkeys =", "of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys, \"HPQ\")", "print(\"--- %s seconds to get prices ---\" % (time.time() - start_time)) #Generate the", "# Group data together hist_data = [growth_1, growth_2] group_labels = ['Group 1', 'Group", "shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] - wallet_value[-1]) shares_owned.append(shares_ex) elif", "as ff ########################################## ######## INPUT DATA ######## ########################################## #Define start and end date", "import plotly import plotly.graph_objects as go import plotly.figure_factory as ff ########################################## ######## INPUT", "TSLA #Define the initial amount of money available per investor initial_cash = 10000", "wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x + y for x,", ") print(\"Patient monkey got an average growth of: \" + str(np.average(patient_growth)) ) print(\"---", "########################################## #||||==-- CREATE A VECTOR WITH THE OPERATIONS OF THE MONKEYS --==|||| def", "width = 5 ), name = ticker) data = data + [lt_evolution] \"\"\"", "seconds to calculate all monkeys wallets ---\" % (time.time() - start_time)) #Calculate the", "= 'lines', line = dict(color = 'rgb(30,30,30)', width = 5 ), name =", "layout = go.Layout( title = file, xaxis = dict(title='Time'), yaxis = dict(title='Monetary Units'))", "MONKEYS --==|||| def monkey_population (n, freq_op): monkeys = {} for i in range(n):", "for an invetsors group --==|||| def wallet_growth (wallets): total_growth = [] for wallet", "x, y in zip(free_cash, wallet_value)] return {\"Free cash\": free_cash, \"Wallet value\": wallet_value, \"Total\":", "#trade_days is the days of the experiment #freq_op is the average hold time", "hold_op = round(np.random.uniform(1, freq_op)) op_day = 0 if next_op == \"B\": next_op =", "######## ########################################## #Define start and end date for the data start = datetime.datetime(2015,", "= 'rgb(130,130,130)', width = 1), name = \"Investor_\" + str(wallet)) data = data", "#Define the initial amount of money available per investor initial_cash = 10000 ##########################################", "= datetime.datetime(2015, 12, 11) end = datetime.datetime(2019, 12, 10) #Declare the tickers in", "# Create distplot with custom bin_size fig = ff.create_distplot(hist_data, group_labels, bin_size=.05, curve_type='normal') fig.show()", "INVESTOR --==|||| def wallet_evolution (operations, ticker): free_cash = [] shares_owned = [] wallet_value", "AND LOW --==|||| def get_prices (ticker_list): stocks_price = {} for ticker in ticker_list:", "for SELL hold_op = round(np.random.uniform(1, freq_op)) #days between operations operations = [] #Vector", "= wallet_evolution (investors_group[investor], ticker) #print (\"Monkey number: \" + str(investor)) #print (\"Ends period", "< hold_op: operations.append(0) op_day += 1 else: operations.append(next_op) hold_op = round(np.random.uniform(1, freq_op)) op_day", "wallets[wallet][\"Growth\"] = growth total_growth.append(growth) return total_growth #||||==-- Plot of wallets for an investors", "= monkey_broker (trade_days, freq_op) return monkeys #||||==-- CREATE A VECTOR WITH LONG TERM", "not in list, include it and run the program again\") return False #||||==--", "evolutoin of the wallets wallets_impatients = wallets_evolution (impatient_monkeys, \"HPQ\") wallets_patients = wallets_evolution (patient_monkeys,", "Growth percentage of the investment for an invetsors group --==|||| def wallet_growth (wallets):", "day and no buy free_cash.append(initial_cash) shares_owned.append(0) wallet_value.append(0) elif shares_owned[-1] == 0: #Days without", "= [] wallet_value = [] for op in operations: if op == 0:", "time for a position next_op = \"B\" #B for BUY // S for", "OF THE MONKEYS --==|||| def monkey_broker (trade_days, freq_op): #trade_days is the days of", "- wallet_value[-1]) else: shares_ex = int(free_cash[-1] / share_price) wallet_value.append(share_price * shares_ex) free_cash.append(free_cash[-1] -", "buy shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] if len(free_cash) == 0: shares_ex = int(initial_cash /", "shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price * shares_ex) total_value = [x + y for", "shares share_price = stocks_price[ticker][\"AVG\"][len(free_cash)] shares_ex = shares_owned[-1] shares_owned.append(0) wallet_value.append(0) free_cash.append(free_cash[-1] + share_price *" ]
[ "Bold and Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics Text #", "Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\")", "fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() #", "from tkinter import * from tkinter import filedialog from tkinter import font root", "Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read and", "def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if", "Write Only w (over-Written) # Write and Read w+ (over written) # Append", "my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40,", "File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root,", "= Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button", "height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button", "\"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags =", "\"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\",", "= Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt)", "and Read w+ (over written) # Append Only a (end of file) #", "Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def", "= Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5)", "text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label = Label(root, text=\"\")", "Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0,", "Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read and write r+ (beginning", "text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save", "# Texto de Python Tkinter Texto en negrita y en cursiva from tkinter", "Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\")", "= Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our", "font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\",", "italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\"", "font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button =", "stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\",", "image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text,", "Append Only a (end of file) # Append and Read a+ (end of", "de Python Tkinter Texto en negrita y en cursiva from tkinter import *", "# read and write r+ (beginning of file) # Write Only w (over-Written)", "= text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold", "italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\")", "font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\",", "and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read and write r+", "Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button =", "from tkinter import filedialog from tkinter import font root = Tk() root.title('Python Tkinter", "Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read", "Text Bold and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file", "Only w (over-Written) # Write and Read w+ (over written) # Append Only", "if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame", "Italics Text # Texto de Python Tkinter Texto en negrita y en cursiva", "#-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y)", "yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\",", "Only a (end of file) # Append and Read a+ (end of file)", "Text # Texto de Python Tkinter Texto en negrita y en cursiva from", "filedialog from tkinter import font root = Tk() root.title('Python Tkinter Text Bold and", "file) # Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file", "open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python", "= open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file =", "Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text():", "select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\",", "text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button", "file) # Write Only w (over-Written) # Write and Read w+ (over written)", "END)) def add_image(): # Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text", "def add_image(): # Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold", "\"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global", "# Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics", "global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position =", "current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\",", "scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root,", "filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close()", "# Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10,", "text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button =", "\"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt():", "Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button", "PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position)", "my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def", "my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text", "my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\"))", "current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10)", "<filename>Python Tkinter Text Bold and Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and", "from tkinter import font root = Tk() root.title('Python Tkinter Text Bold and Italics", "my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------#", "= font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags:", "font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\",", "tkinter import font root = Tk() root.title('Python Tkinter Text Bold and Italics Text')", "r # read and write r+ (beginning of file) # Write Only w", "= Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label =", "* from tkinter import filedialog from tkinter import font root = Tk() root.title('Python", "root.geometry(\"600x600\") # Read Only r # read and write r+ (beginning of file)", "and write r+ (beginning of file) # Write Only w (over-Written) # Write", "my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags", "my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\"))", "my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags =", "text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\",", "Write and Read w+ (over written) # Append Only a (end of file)", "Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root,", "Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button", "Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text", "bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label = Label(root, text=\"\") my_label.pack(pady=5) root.mainloop()", "my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll =", "my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt)", "font root = Tk() root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter", "title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END))", "def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\")", "# Write Only w (over-Written) # Write and Read w+ (over written) #", "# Append Only a (end of file) # Append and Read a+ (end", "\"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font", "text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open Text File\",", "= Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\",", "(over-Written) # Write and Read w+ (over written) # Append Only a (end", "text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root,", "current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\",", "selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font)", "command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it)", "# Write and Read w+ (over written) # Append Only a (end of", "Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar", "a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold", "= open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global my_image my_image", "\"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar", "Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label = Label(root,", "=\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def", "import font root = Tk() root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python", "and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file,", "bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in", "Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics Text # Texto de Python", "Tkinter Text Bold and Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics", "Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff", "cursiva from tkinter import * from tkinter import filedialog from tkinter import font", "text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter", "= my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags", "= Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5)", "Text Bold and Italics Text # Texto de Python Tkinter Texto en negrita", "of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics", "position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def", "= font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags:", "Bold and Italics Text # Texto de Python Tkinter Texto en negrita y", "Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only", "a (end of file) # Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------#", "Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button =", "\"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\")", "Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics Text # Texto de", "Python Tkinter Texto en negrita y en cursiva from tkinter import * from", "# Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file =", "save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\",", "select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5)", "(over written) # Append Only a (end of file) # Append and Read", "w+ (over written) # Append Only a (end of file) # Append and", "else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll", "my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position,", "Tkinter Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def", "image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder)", "negrita y en cursiva from tkinter import * from tkinter import filedialog from", "root = Tk() root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text", "of file) # Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt():", "Tkinter Texto en negrita y en cursiva from tkinter import * from tkinter", "r+ (beginning of file) # Write Only w (over-Written) # Write and Read", "Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\",", "in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font =", "Bold and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file =", "Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20)", "Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python", "Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\",", "File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root,", "italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in", "tkinter import filedialog from tkinter import font root = Tk() root.title('Python Tkinter Text", "Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r", "Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position", "Only r # read and write r+ (beginning of file) # Write Only", "my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else:", "root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics", "open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global my_image my_image =", "Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected", "written) # Append Only a (end of file) # Append and Read a+", "import * from tkinter import filedialog from tkinter import font root = Tk()", "\"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter", "my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font =", "title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff =", "and Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read", "Texto en negrita y en cursiva from tkinter import * from tkinter import", "Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame,", "Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button", "en negrita y en cursiva from tkinter import * from tkinter import filedialog", "# Python Tkinter Text Bold and Italics Text # Texto de Python Tkinter", "filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\",", "Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16),", "selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text", "filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add", "width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview)", "w (over-Written) # Write and Read w+ (over written) # Append Only a", "(beginning of file) # Write Only w (over-Written) # Write and Read w+", "Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\")", "Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END,", "Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected)", "image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text)", "bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label", "Texto de Python Tkinter Texto en negrita y en cursiva from tkinter import", "(end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and", "bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\")", "current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text,", "Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read and write r+ (beginning of", "= my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\")", "Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\")", "else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font)", "select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\", command=bolder) bold_button.pack(pady=5)", "Read w+ (over written) # Append Only a (end of file) # Append", "en cursiva from tkinter import * from tkinter import filedialog from tkinter import", "my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\",", "if \"bold\" in current_tags: my_text.tag_remove(\"bold\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it():", "# Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20)", "def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open", "in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root)", "Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image", "Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico')", "Read Only r # read and write r+ (beginning of file) # Write", "and Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics Text # Texto", "my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT)", "Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") #", "open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open Text", "font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\",", "16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root,", "= Tk() root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold", "and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter", "\"sel.first\", \"sel.last\") else: my_text.tag_add(\"bold\", \"sel.first\", \"sel.last\") def italics_it(): italic_font = font.Font(my_text, my_text.cget(\"font\")) italic_font.configure(slant=\"italic\")", "command=bolder) bold_button.pack(pady=5) italics_button = Button(root, text=\"italics\", command=italics_it) italics_button.pack(pady=5) my_label = Label(root, text=\"\") my_label.pack(pady=5)", "= my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder():", "=\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read()", "import filedialog from tkinter import font root = Tk() root.title('Python Tkinter Text Bold", "Python Tkinter Text Bold and Italics Text # Texto de Python Tkinter Texto", "Tk() root.title('Python Tkinter Text Bold and Italics Text') root.iconbitmap('Python Tkinter Text Bold and", "#----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title", "our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button =", "text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack()", "open_button = Button(root, text=\"Open Text File\", command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\",", "file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\",", "Text Bold and Italics Text/textWidgetBoldItalicsText.py # Python Tkinter Text Bold and Italics Text", "and Italics Text # Texto de Python Tkinter Texto en negrita y en", "# Read Only r # read and write r+ (beginning of file) #", "def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if", "command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select Text\", command=select_text) select_button.pack(pady=5) bold_button = Button(root, text=\"Bold\",", "\"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame)", "File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): #", "Text File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image():", "bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\") if \"bold\"", "def open_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open", "write r+ (beginning of file) # Write Only w (over-Written) # Write and", "tkinter import * from tkinter import filedialog from tkinter import font root =", "= filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text", "text_file = open(text_file, \"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global my_image", "Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll)", "Tkinter Text Bold and Italics Text # Texto de Python Tkinter Texto en", "(end of file) # Append and Read a+ (end of file) #----------------------------------------------------------------------------Function--------------------------------------------------------------------------# def", "stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text", "Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get()", "open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\",", "save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image)", "my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else:", "my_text.cget(\"font\")) italic_font.configure(slant=\"italic\") my_text.tag_configure(\"italic\", font=italic_font) current_tags = my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\",", "add_image(): # Add Image global my_image my_image = PhotoImage(file=\"Python Tkinter Text Bold and", "and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected =", "\"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT,", "command=open_txt) open_button.pack(pady=20) save_button = Button(root, text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add", "root.iconbitmap('Python Tkinter Text Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r #", "my_text.index(INSERT) my_text.image_create(position, image=my_image) my_label.config(text=position) def select_text(): selected = my_text.selection_get() my_label.config(text=selected) def bolder(): bold_font", "text=\"Save File\", command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button =", "Bold and Italics Text/icons/panda.ico') root.geometry(\"600x600\") # Read Only r # read and write", "y en cursiva from tkinter import * from tkinter import filedialog from tkinter", "my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics", "of file) # Write Only w (over-Written) # Write and Read w+ (over", "read and write r+ (beginning of file) # Write Only w (over-Written) #", "= PhotoImage(file=\"Python Tkinter Text Bold and Italics Text/images/softwares.png\") position = my_text.index(INSERT) my_text.image_create(position, image=my_image)", "my_label.config(text=selected) def bolder(): bold_font = font.Font(my_text, my_text.cget(\"font\")) bold_font.configure(weight=\"bold\") my_text.tag_configure(\"bold\", font=bold_font) current_tags = my_text.tag_names(\"sel.first\")", "= my_text.tag_names(\"sel.first\") if \"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\")", "Tkinter Text Bold and Italics Text/\", title =\"Open Text File\", filetypes=((\"Text Files\", \"*.txt\"),))", "text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title", "save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and Italics Text/\", title =\"Open Text", "\"w\") text_file.write(my_text.get(1.0, END)) def add_image(): # Add Image global my_image my_image = PhotoImage(file=\"Python", "\"italic\" in current_tags: my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame =", "\"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) # Create", "my_text = Text(my_frame, width=40, height=10, font=(\"Helvetica\", 16), selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure", "= Frame(root) my_frame.pack(pady=10) # Create Scrolbar text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) my_text =", "selectbackground=\"green\", selectforeground=\"black\", yscrollcommand=text_scroll) my_text.pack() # Configure our scrollbar text_scroll.config(command=my_text.yview) open_button = Button(root, text=\"Open", "command=save_txt) save_button.pack(pady=20) image_button = Button(root, text=\"Add Image\", command=add_image) image_button.pack(pady=5) select_button = Button(root, text=\"Select", "my_text.tag_remove(\"italic\", \"sel.first\", \"sel.last\") else: my_text.tag_add(\"italic\", \"sel.first\", \"sel.last\") #-------------------------------------------------Frame-------------------------------------------------------# my_frame = Frame(root) my_frame.pack(pady=10) #", "text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file = filedialog.askopenfilename(initialdir=\"Python Tkinter Text Bold and", "text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def save_txt(): text_file", "File\", filetypes=((\"Text Files\", \"*.txt\"),)) text_file = open(text_file, \"r\") stuff = text_file.read() my_text.insert(END, stuff)" ]
[ "* from mantle import And, XOr from simulator import testvectors main = DefineCircuit('main',", "magma import * from mantle import And, XOr from simulator import testvectors main", "\"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b)", "simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\",", "And, XOr from simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit),", "import And, XOr from simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\",", "\"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d = XOr(2)(t,main.c) wire(d,main.d)", "from mantle import And, XOr from simulator import testvectors main = DefineCircuit('main', \"a\",", "DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t =", "\"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d = XOr(2)(t,main.c) wire(d,main.d) EndCircuit() print(testvectors(main))", "import * from mantle import And, XOr from simulator import testvectors main =", "In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d = XOr(2)(t,main.c) wire(d,main.d) EndCircuit()", "from simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit),", "mantle import And, XOr from simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit),", "In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d = XOr(2)(t,main.c)", "= DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t", "In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d", "import os os.environ['MANTLE'] = 'lattice' from magma import * from mantle import And,", "'lattice' from magma import * from mantle import And, XOr from simulator import", "main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit))", "from magma import * from mantle import And, XOr from simulator import testvectors", "testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK',", "os os.environ['MANTLE'] = 'lattice' from magma import * from mantle import And, XOr", "import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit),", "\"b\", In(Bit), \"c\", In(Bit), \"d\", Out(Bit), 'CLK', In(Bit)) t = And(2)(main.a,main.b) d =", "= 'lattice' from magma import * from mantle import And, XOr from simulator", "XOr from simulator import testvectors main = DefineCircuit('main', \"a\", In(Bit), \"b\", In(Bit), \"c\",", "os.environ['MANTLE'] = 'lattice' from magma import * from mantle import And, XOr from" ]
[ "bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers import nms import", "max_scores = np.amax(cls_prob, axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals =", "import cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch", "keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals,", "ignore bg # sort scores max_scores = np.amax(cls_prob, axis=1) # keep top scores", "return blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass", "def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self,", "def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass def reshape(self, bottom, top):", "__init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self, rois,", "rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:]", "Network withFeature Mimicking # Copyright (c) 2018 University of Illinois # Licensed under", "cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois", "from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform", "Attention Network withFeature Mimicking # Copyright (c) 2018 University of Illinois # Licensed", "= np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index", "class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0]", "rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:,", "by <NAME> # ------------------------------------------------------- import torch import torch.nn as nn import numpy as", "# sort scores max_scores = np.amax(cls_prob, axis=1) # keep top scores keep_index =", "# ------------------------------------------------------- import torch import torch.nn as nn import numpy as np import", "backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping", "False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top =", "req, out_grad, in_data, out_data, in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens", ":] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores max_scores =", "torch.nn as nn import numpy as np import math import yaml from model.utils.config", "bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import", "= False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top", "= cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores max_scores = np.amax(cls_prob, axis=1)", "pass def reshape(self, bottom, top): \"\"\"Reshaping happens during the call to forward.\"\"\" pass", "# -------------------------------------------------------- # Spatial Attention Network withFeature Mimicking # Copyright (c) 2018 University", "numpy as np import math import yaml from model.utils.config import cfg from model.rpn.generate_anchors", "-------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified by <NAME> # ------------------------------------------------------- import", "import nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer,", "im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores max_scores", "proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32,", "from model.roi_layers import nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self,", "and modified Modified by <NAME> # ------------------------------------------------------- import torch import torch.nn as nn", "# Reorganized and modified Modified by <NAME> # ------------------------------------------------------- import torch import torch.nn", "import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper", "1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self,", "clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return", "DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic", "Modified by <NAME> # ------------------------------------------------------- import torch import torch.nn as nn import numpy", "bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] #", "from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers", "np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1),", "model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes,", "im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index,", "np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def", "import absolute_import # -------------------------------------------------------- # Spatial Attention Network withFeature Mimicking # Copyright (c)", "_DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def", "nms from model.roi_layers import nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def", "University of Illinois # Licensed under The MIT License [see LICENSE for details]", ".bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers import", "from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv,", "------------------------------------------------------- import torch import torch.nn as nn import numpy as np import math", "np import math import yaml from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors", "= bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore", "Spatial Attention Network withFeature Mimicking # Copyright (c) 2018 University of Illinois #", "as nn import numpy as np import math import yaml from model.utils.config import", "blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self, req, out_grad,", "License [see LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified", "withFeature Mimicking # Copyright (c) 2018 University of Illinois # Licensed under The", "bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg", "<NAME> # ------------------------------------------------------- import torch import torch.nn as nn import numpy as np", "of Illinois # Licensed under The MIT License [see LICENSE for details] #", "-------------------------------------------------------- # Spatial Attention Network withFeature Mimicking # Copyright (c) 2018 University of", "for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified by <NAME>", "Illinois # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------", "np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data, out_data,", "top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2])", "# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps", "cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from", "import numpy as np import math import yaml from model.utils.config import cfg from", "= np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data,", "int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info =", "keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds =", "# keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals =", "out_grad, in_data, out_data, in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens during", "# Copyright (c) 2018 University of Illinois # Licensed under The MIT License", "in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens during the call to", "absolute_import # -------------------------------------------------------- # Spatial Attention Network withFeature Mimicking # Copyright (c) 2018", "from __future__ import absolute_import # -------------------------------------------------------- # Spatial Attention Network withFeature Mimicking #", "* self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0,", "im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort", "im_info): num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:,", "= cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top)", "= im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores", "import math import yaml from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors #", "Mimicking # Copyright (c) 2018 University of Illinois # Licensed under The MIT", "import torch import torch.nn as nn import numpy as np import math import", "nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__()", "import nms from model.roi_layers import nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module):", "dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self, req,", "sort scores max_scores = np.amax(cls_prob, axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index]", "self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index", "cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores max_scores = np.amax(cls_prob, axis=1) #", "scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds", "rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob", "2018 University of Illinois # Licensed under The MIT License [see LICENSE for", "as np import math import yaml from model.utils.config import cfg from model.rpn.generate_anchors import", "axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals", "[see LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified", "details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified by <NAME> #", "-------------------------------------------------------- # Reorganized and modified Modified by <NAME> # ------------------------------------------------------- import torch import", "from model.nms.nms_wrapper import nms from model.roi_layers import nms import pdb DEBUG = False", "model.roi_layers import nms import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic):", "bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob =", "blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass def", "= clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))", "scores max_scores = np.amax(cls_prob, axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals", "model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from", "= rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob =", "MIT License [see LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and", "torch import torch.nn as nn import numpy as np import math import yaml", "clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms", "4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg #", "np.amax(cls_prob, axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas)", "Copyright (c) 2018 University of Illinois # Licensed under The MIT License [see", "= int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info", "batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False))) return blob[keep_index, :],", "pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic =", "self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info):", "clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from", "bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob = np.hstack((batch_inds,", "The MIT License [see LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized", "__future__ import absolute_import # -------------------------------------------------------- # Spatial Attention Network withFeature Mimicking # Copyright", "= np.amax(cls_prob, axis=1) # keep top scores keep_index = np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois,", "import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred,", "under The MIT License [see LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- #", "bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers import nms import pdb DEBUG", "forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:,", "import yaml from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform", "class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP", "clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers import nms import pdb", "bg # sort scores max_scores = np.amax(cls_prob, axis=1) # keep top scores keep_index", ":], keep_index def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass def reshape(self,", "cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas", "# ignore bg # sort scores max_scores = np.amax(cls_prob, axis=1) # keep top", "1:] # ignore bg # sort scores max_scores = np.amax(cls_prob, axis=1) # keep", "out_data, in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens during the call", "num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8]", "# -------------------------------------------------------- # Reorganized and modified Modified by <NAME> # ------------------------------------------------------- import torch", "keep_index def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass def reshape(self, bottom,", "proposals.astype(np.float32, copy=False))) return blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data, out_data, in_grad,", "self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :]", "= class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index =", "model.nms.nms_wrapper import nms from model.roi_layers import nms import pdb DEBUG = False class", "1:] bbox_deltas = bbox_pred_tensor.cpu().detach().numpy()[:, 4:8] im_info = im_info.cpu().detach().numpy()[0, :] cls_prob = cls_prob.cpu().detach().numpy()[:, 1:]", "(c) 2018 University of Illinois # Licensed under The MIT License [see LICENSE", "def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois =", "# Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- #", "Reorganized and modified Modified by <NAME> # ------------------------------------------------------- import torch import torch.nn as", "math import yaml from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors # from", "# from model.nms.nms_wrapper import nms from model.roi_layers import nms import pdb DEBUG =", "import bbox_pred, clip_boxes, bbox_overlaps # from model.nms.nms_wrapper import nms from model.roi_layers import nms", "super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor,", "= np.argsort(-max_scores)[:num_keep_index] proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0],", "nn import numpy as np import math import yaml from model.utils.config import cfg", "aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens during the call to forward.\"\"\"", "class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic = class_agnostic self._top = cfg.DCR.TOP def forward(self, rois, cls_prob,", "self._top = cfg.DCR.TOP def forward(self, rois, cls_prob, bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] *", "bbox_pred_tensor, im_info): num_keep_index = int(rois.shape[0] * self._top) rois = rois[0].cpu().detach().numpy()[:, 1:] bbox_deltas =", "cls_prob = cls_prob.cpu().detach().numpy()[:, 1:] # ignore bg # sort scores max_scores = np.amax(cls_prob,", "modified Modified by <NAME> # ------------------------------------------------------- import torch import torch.nn as nn import", "from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes, bbox_overlaps #", "proposals = bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)", "generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import bbox_pred, clip_boxes,", "copy=False))) return blob[keep_index, :], keep_index def backward(self, req, out_grad, in_data, out_data, in_grad, aux):", "Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- # --------------------------------------------------------", "import pdb DEBUG = False class _DCRProposalLayer(nn.Module): def __init__(self, class_agnostic): super(_DCRProposalLayer, self).__init__() self.class_agnostic", "# -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified by <NAME> # -------------------------------------------------------", "= bbox_pred(rois, bbox_deltas) proposals = clip_boxes(proposals, im_info[:2]) batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) blob", "yaml from model.utils.config import cfg from model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import", "LICENSE for details] # -------------------------------------------------------- # -------------------------------------------------------- # Reorganized and modified Modified by", "import torch.nn as nn import numpy as np import math import yaml from", "model.rpn.generate_anchors import generate_anchors # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch from .bbox.bbox_transform import", "# Spatial Attention Network withFeature Mimicking # Copyright (c) 2018 University of Illinois", "in_data, out_data, in_grad, aux): pass def reshape(self, bottom, top): \"\"\"Reshaping happens during the" ]
[ "(100 * self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time prg", "self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if self.total", "from DTL.qt import QtGui from DTL.api import apiUtils from DTL.gui import Core, Dialog", "#------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------ if __name__", "Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total',", "self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if self.total > 0 : return", "apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message)", "#------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1)", "class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current',", "import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def", "return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------", "if self.total > 0 : return 1.0 / self.total else: return 0 #------------------------------------------------------------", "DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0,", "Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self,", "'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update()", "value(self, recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__':", "return (100 * self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time", "#------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total)", "from DTL.api import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog):", "#------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self,", "percent(self): if self.total > 0 : return 1.0 / self.total else: return 0", "* self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time prg =", "'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget,", "def percent(self): if self.total > 0 : return 1.0 / self.total else: return", "self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def", "DTL.qt import QtGui from DTL.api import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------", "def value(self, recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------ if __name__ ==", "self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current *", "<filename>DTL/gui/widgets/progresswidget.py from DTL.qt import QtGui from DTL.api import apiUtils from DTL.gui import Core,", "QtGui from DTL.api import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class", "def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if self.total > 0", "import QtGui from DTL.api import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------", "#------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if self.total >", "current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value())", "else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current * self.percent())", "DTL.api import apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------", ": return 1.0 / self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return", "0 #------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------ if", "0 : return 1.0 / self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True):", "import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'):", "#------------------------------------------------------------ def percent(self): if self.total > 0 : return 1.0 / self.total else:", "self.total > 0 : return 1.0 / self.total else: return 0 #------------------------------------------------------------ def", "self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current +", "time prg = ProgressWidget(total=5, message='Test Loading...') for i in range(5): time.sleep(1) prg.setMessage(str(i)) prg.increment()", "update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------", "self.update() #------------------------------------------------------------ def percent(self): if self.total > 0 : return 1.0 / self.total", "__name__ == '__main__': import time prg = ProgressWidget(total=5, message='Test Loading...') for i in", "apiUtils from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self,", "apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show()", "1) self.update() #------------------------------------------------------------ def percent(self): if self.total > 0 : return 1.0 /", "message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update()", "1.0 / self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100 *", "self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self):", "total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1)", "== '__main__': import time prg = ProgressWidget(total=5, message='Test Loading...') for i in range(5):", "self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def", "total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------", "'__main__': import time prg = ProgressWidget(total=5, message='Test Loading...') for i in range(5): time.sleep(1)", "* self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time prg = ProgressWidget(total=5, message='Test", "+ 1) self.update() #------------------------------------------------------------ def percent(self): if self.total > 0 : return 1.0", "prg = ProgressWidget(total=5, message='Test Loading...') for i in range(5): time.sleep(1) prg.setMessage(str(i)) prg.increment() prg.close()", "ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current)", "message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center()", "self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------", "/ self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100 * self.current", "'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def update(self):", "self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time prg = ProgressWidget(total=5,", "return 1.0 / self.total else: return 0 #------------------------------------------------------------ def value(self, recursive=True): return (100", "#------------------------------------------------------------ if __name__ == '__main__': import time prg = ProgressWidget(total=5, message='Test Loading...') for", "super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if", "self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self):", "recursive=True): return (100 * self.current * self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import", "from DTL.gui import Core, Dialog #------------------------------------------------------------ #------------------------------------------------------------ class ProgressWidget(Dialog): #------------------------------------------------------------ def onFinalize(self, total=1,", "def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current + 1) self.update()", "current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message)", "self.percent()) #------------------------------------------------------------ if __name__ == '__main__': import time prg = ProgressWidget(total=5, message='Test Loading...')", "increment(self): self.setCurrent(self.current + 1) self.update() #------------------------------------------------------------ def percent(self): if self.total > 0 :", "self.show() self.update() #------------------------------------------------------------ def update(self): self.ui_ProgressBar.setValue(self.value()) self.ui_Label.setText(self.message) super(ProgressWidget, self).update() #------------------------------------------------------------ def increment(self): self.setCurrent(self.current", "apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message) self.ui_ProgressBar.setValue(1) self.ui_Label.setText(self.message) self.center() self.show() self.update() #------------------------------------------------------------ def", "if __name__ == '__main__': import time prg = ProgressWidget(total=5, message='Test Loading...') for i", "import time prg = ProgressWidget(total=5, message='Test Loading...') for i in range(5): time.sleep(1) prg.setMessage(str(i))", "def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message',", "> 0 : return 1.0 / self.total else: return 0 #------------------------------------------------------------ def value(self,", "onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self, 'message', message)", "#------------------------------------------------------------ def onFinalize(self, total=1, current=0, message='Loading...'): apiUtils.synthesize(self, 'total', total) apiUtils.synthesize(self, 'current', current) apiUtils.synthesize(self," ]
[ "in fs_form_list] @property def site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\ @property", "blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today)", "models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\")", "None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return", "blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user", "image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): #", "FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy = True def save(self, *args, **kwargs):", "self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id", "if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in json_answer:", "len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes", "def check_version(xml, n): for i in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\"", "self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule", "**kwargs): # site = kwargs.get('instance') # created = kwargs.get('created') # if created: #", "Already Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists():", "is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS)", "== \"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes within main", "data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class", "None: update_data = {} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False)", "order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2,", "= models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready =", "= first_children['type'] answer= '' if question in json_answer: if first_children['type'] == 'note': answer=", "else: self.has_start_time = False def get_survey(self): if not hasattr(self, \"_survey\"): try: builder =", "verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\",", "question_type == 'photo' or question_type == 'audio' or question_type == 'video': answer =", "site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf", "pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages:", "not self.stage else False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0", "file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes =", "import models from django.db.models import Max from django.db.models.signals import post_save, pre_delete from django.utils.translation", "FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return", "'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for", "nodes with the id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n", "choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False)", "if len(formhub_nodes) == 0: # append the calculate bind node calculate_node = doc.createElement(\"bind\")", "if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if", "return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id')", "is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk:", "models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False)", "models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class", "= first_children['name'] group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer: if", "[(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2,", "if not self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and", "for first_children in g_object['children']: question = first_children['name'] question_type = first_children['type'] if question_type ==", "-1: self.has_start_time = True else: self.has_start_time = False def get_survey(self): if not hasattr(self,", "self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self):", "if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod", "= models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\"", "base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if", "json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer =", "schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not", "@property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged)", "is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using,", "models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created =", "self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version =", "getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image", "\"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self): return True if not self.stage", "return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image =", "= (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title", "self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save() method. @property", "null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL)", "get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1]", "created and not instance.is_staged: send_message_project_form(instance) elif created and instance.site is not None and", "models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title", "= models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True)", "db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\")", "ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0}", "import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import", "if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return", "getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else:", "= first_children['name'] answer = '' if 'label' in first_children: question = first_children['label'] row={'type':question_type,", "Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub", "related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id)", "blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False)", "self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return", "# project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue #", "from __future__ import unicode_literals import datetime import os import json import re from", "*args, **kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return", "== \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes", "= \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return", "None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self):", "= _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\",", "raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list =", "import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import", "django.utils.translation import ugettext_lazy as _ from django.dispatch import receiver from jsonfield import JSONField", "self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf", "\"\"\" if not file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc =", "self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled and not self.is_staged:", "= models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified", "models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0,", "in r_object['children']: question_type = first_children['type'] question = first_children['name'] answer = '' if 'label'", "for i in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m", "import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form,", "calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" %", "related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global", "'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set UUID node in", "}) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,", "if not self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise", "ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False,", "return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms',", "_(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\":", "def __unicode__(self): if self.site_fxf is None: return u\"%s\" % str(self.submitted_by) + \"---\" +", "choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date']", "blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True)", "or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question]", "# append the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" %", "@property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self): status", "fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s", "related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance: return", "= kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data)", "schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end)", "\"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled and not self.is_staged: return \"general\"", "stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms #", "= models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True)", "class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model):", "# if pss.tags and site.type: # if site.type.id not in pss.tags: # continue", "DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in", "+ 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order", "description = models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title =", "survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) #", "FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project,", "Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf,", "DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm,", "return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else", "unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set", "raise Exception( u\"Multiple survey nodes with the id '%s'\" % self.id_string) survey_node =", "in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) >", "re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True)", "parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type'] answer= '' if question in", "= 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in first_children: question", "self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes if n.nodeType", "site_main_stage.save() # for pss in project_sub_stages: # if pss.tags and site.type: # if", "ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models", "pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id,", "created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager):", "models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs =", "self.is_staged and self.stage: return self.stage.id return None def stage_name(self): if self.stage: return self.stage.name", "[] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self):", "'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name =", "(3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True)", "stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: if not Stage.objects.filter(project=project).exists():", "def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created", "blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform =", "instance.project is not None and created and not instance.is_staged: send_message_project_form(instance) elif created and", "= re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1) version = check_version(xml) if", "= 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time = False", "clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER", "pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None", "verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True if", "return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and", "if len(model_nodes) != 1: raise Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0]", "= [n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName ==", "== \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type", "= project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True)", "import receiver from jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json", "# description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf =", "models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance,", "fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\"", "null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level", "= first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if", "**kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\"", "= survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE", "= Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 def __unicode__(self):", "p = re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1) version = check_version(xml)", "related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True,", "= models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project =", "FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return", "None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name)", "= mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order", "len(formhub_nodes) == 0: # append the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute(", "if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or first_children['type'] ==", "related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model):", "xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid =", "{1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None:", "self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def", "file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\")", "self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def", "ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey)", "onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL =", "reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\"", "first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name']", "\"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site =", "def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf", "parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in json_answer: if question_type == 'note':", "message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status =", "pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True,", "created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created =", "not instance.is_staged: send_message_project_form(instance) elif created and instance.site is not None and not instance.is_staged:", "name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified =", "= self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in json_answer: for", "return \"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self): return True if not", "reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status", "FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled", "= models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def", "node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if", "if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate", "+ self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return", "project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf =", "= json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name']", "= re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) return", "from xml.dom import Node from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import", "return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True)", "Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site,", "date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\",", "'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time = False def", "return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site,", "fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self):", "xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self): status = 0", "__unicode__(self): if self.site_fxf is None: return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title", "in r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer =", "= '' if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row)", "\"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and self.stage:", "pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() #", "onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from", "= survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes if node.nodeType", "\\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey", "is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager()", "+ self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question", "return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1:", "not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not", "\"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def", "= models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by", "blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) #", "SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True,", "from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from", "def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({", "else: question = first_children['name'] question_type = first_children['type'] answer= '' if question in json_answer:", "= super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is", "file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1:", "project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages: #", "pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from", "if self.form_exists() else None @property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status", "onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message,", "from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0,", "update_data = {} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except", "blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project", "models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True)", "= ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name =", "= SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey", "return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def", "= Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: if", "def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return []", "max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self): import", "null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf =", "answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in first_children:", "models.CharField(max_length=255, default=u'') @property def get_version(self): import re n = XML_VERSION_MAX_ITER xml = self.xml", "return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count():", "project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days", "models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml", "null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage =", "file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes with", "def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def", "return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1:", "formhub nodes within main instance node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0]", "order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages:", "date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False)", "= '' if g_question+\"/\"+question in json_answer: if question_type == 'note': answer= '' elif", "= models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text", "== 'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer", "doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE", "re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return m1.group(1) p1 =", "form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id):", "survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and", "order = mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order'))", "'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and not self.is_staged: if self.site:", "first_children['type'] answer= '' if question in json_answer: if first_children['type'] == 'note': answer= ''", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"),", "django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers", "return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model):", "== \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append", "# schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date =", "= Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: mo =", "@property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status", "super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod", "fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance)", "django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'),", "= json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row)", "> 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property", "verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse(", "instance.is_staged: send_message_project_form(instance) elif created and instance.site is not None and not instance.is_staged: send_message(instance)", "async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi,", "models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data'", "'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created", "models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural =", "stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order,", "first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children)", "return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform',", "flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if site:", "'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self):", "instance.site is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance,", "= models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True)", "= self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1) version", "null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances')", "null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,)", "send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models", "return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) +", "verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\":", "False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self): return", "verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",)", "Add bind to automatically set UUID node in XML. \"\"\" if not file_name:", "not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already", "date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s,", "project, stage): if site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage is not", "= re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput =", "self.schedule: return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk ==", "null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial,", "return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf else:", "FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if", "(\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description", "def get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder", "inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json", "models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" ,", "instance, created, **kwargs): if instance.project is not None and created and not instance.is_staged:", "first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children", "app devs to make them easier. from django.contrib.sites.models import Site as DjangoSite from", "first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question =", "1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node", "from django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy as _ from django.dispatch", "pre_delete from django.utils.translation import ugettext_lazy as _ from django.dispatch import receiver from jsonfield", "self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list", "pass elif instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance):", "fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date", "p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return m.group(1)", "models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs =", "Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise", "schedule_form in schedule_forms: # schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s", "node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\"", "site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged", "self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return", "related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True)", "from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import Site, Project,", "survey nodes with the id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes =", "get domain to give complete url for app devs to make them easier.", "self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self):", "if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname,", "raise Exception( u\"Multiple formhub nodes within main instance node\") elif len(formhub_nodes) == 1:", ", null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id) ,self.offline_site_id)", "if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id)", "FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage,", "from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from", "in json_answer: for gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type']", "and instance.site is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender,", "mo.get('order__max', 0) return order + 1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif", "__unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created,", "return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1:", "\"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\"", "'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'),", "max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\",", "= first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by)", "= models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey =", "\"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return", "def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type']", "self.site_fxf is None: return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\"", "\"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,)", "xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json =", "models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description", "question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']}", "group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer: if first_children['type'] ==", "project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog')", "p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1)", "raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not", "self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring)", "else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question,", "db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\"))", "def schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if", "= GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural", "else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage is not None: if not", "create_messages(sender, instance, created, **kwargs): if instance.project is not None and created and not", "from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give", "self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer", "to automatically set UUID node in XML. \"\"\" if not file_name: file_name =", "if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml)", "get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class", "instance node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore(", "= models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level =", "= models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32,", "= (\"order\",) def save(self, *args, **kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args,", "Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self,", "blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True,", "null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global =", "return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id):", "len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without the id \" u\"attribute, can't", "doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains multiple model nodes\") model_node =", "return True if not self.stage else False def sub_stage_count(self): if not self.stage: return", "import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import", "= first_children['name'] question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer =", "= inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True)", "in json_answer: if question_type == 'note': answer= '' elif question_type == 'photo' or", "media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label']", "FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return", "= ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form", "get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model):", "else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save() method.", "'' elif question_type == 'photo' or question_type == 'audio' or question_type == 'video':", "automatically set UUID node in XML. \"\"\" if not file_name: file_name = self.file_name()", "set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'),", "Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property", "self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if", "title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\",", "re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub(", "# site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True,", "= project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: # schedule = schedule_form.schedule", "None: self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status", "= models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table =", "and not instance.is_staged: send_message_project_form(instance) elif created and instance.site is not None and not", "FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def", "= models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start =", "__future__ import unicode_literals import datetime import os import json import re from django.contrib.auth.models", "'photo' or question_type == 'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder", "active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property", "**kwargs): if instance.project is not None: pass elif instance.is_staged: pass else: fxf =", "models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form =", "r_object['children']: question_type = first_children['type'] question = first_children['name'] answer = '' if 'label' in", "user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def", "prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput)", "= models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class", "return None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf,", "models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by =", "False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if", "**kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance,", "related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs):", "not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() >", "models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator", "\"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group =", "+ \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer =", "is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog')", "g_object['children']: question = first_children['name'] question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue", "models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material", "# site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms", "save(self, *args, **kwargs): if self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json =", "is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), })", "send_delete_message(sender, instance, using, **kwargs): if instance.project is not None: pass elif instance.is_staged: pass", "def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self):", "first child whose id attribute matches our id_string survey_nodes = [node for node", "def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not", "null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight", "default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form", "is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False)", "= models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def", "calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\")", "sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if", "'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in", "= _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count()", "u\"Multiple survey nodes with the id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes", "\"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def", "and self.stage: return self.stage.id return None def stage_name(self): if self.stage: return self.stage.name def", "Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name,", "\"\"\" Add bind to automatically set UUID node in XML. \"\"\" if not", "first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type = first_children['type']", "\"\"\") m = p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\"", "set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def", "self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the", "None def save(self, *args, **kwargs): if self.xls and not self.xml: survey = create_survey_from_xls(self.xls)", "= models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class", "or first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question]", "= models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0,", "xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\")", "models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True,", "date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True,", "= prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name'] question_type = first_children['type'] if", "model_node = model_nodes[0] instance_nodes = [node for node in model_node.childNodes if node.nodeType ==", "date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\",", "'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk ==", "in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in", "instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy =", "# project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk,", "xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in", "in json_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or", "inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls =", "None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if", "def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return '{0} form", "project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False,", "submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects =", "\"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1) return None def save(self, *args,", "import ugettext_lazy as _ from django.dispatch import receiver from jsonfield import JSONField from", "getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if", "= models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\",", "return self.schedule.id if self.is_staged and self.stage: return self.stage.id return None def stage_name(self): if", "models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False)", "if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return", "project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\")", "not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled:", "return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def", "for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and", "((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\")", "verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance')", "jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from", "make them easier. from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid", "Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\")", "r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer in json_answer[r_question]: for first_children in", "1: raise Exception( u\"Multiple survey nodes with the id '%s'\" % self.id_string) survey_node", "self.fsform: self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}-", "# FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms =", "'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']}", "FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form", "first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if", "@property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return getattr(self,", "# if site.type.id not in pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order,", "return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def", "None class Meta: proxy = True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data',", "project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) #", "to give complete url for app devs to make them easier. from django.contrib.sites.models", "def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name']", "= self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" +", "raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and not self.is_staged:", "survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and", "\"\"\".format(i)) m = p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\"", "fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image", "class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf =", "site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start", "= GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural =", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural", "question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object:", "general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms", "node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes with the id", "stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for", "main instance node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node =", "def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property def", "if m1: return m1.group(1) return None def save(self, *args, **kwargs): if self.xls and", "m = p.search(xml) if m: return m.group(1) version = check_version(xml) if version: return", "answer = '' if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer}", "self.schedule.id if self.is_staged and self.stage: return self.stage.id return None def stage_name(self): if self.stage:", "shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True,", "0) return order + 1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage", "= models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status =", "self.is_survey: return \"survey\" if not self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self):", "not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None", "if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the calculate", "models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True)", "output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput", "FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), })", "django.db import models from django.db.models import Max from django.db.models.signals import post_save, pre_delete from", "None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site,", "models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage'", "= None class Meta: proxy = True def save(self, *args, **kwargs): self._update_fs_data =", "models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization =", "else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site,", "project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight", "s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed", "% str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" +", "= models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created", "choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta:", "models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\")", "# project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages:", "project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created", "related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\")", "= 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def form_count(self): return", "models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects", "else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id =", "not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return", "media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in json_answer:", "# @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created", "id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version", "0) return order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max',", "encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id =", "def __unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id) ,self.offline_site_id) return u\"%s\" %", "the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\",", "first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if", "'{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\",", "GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse", "import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml", "'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder", "if self.schedule: return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk", "choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name", "models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site =", "= model_nodes[0] instance_nodes = [node for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE", "= models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True,", "= models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager()", "{1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return u\"%s\" % str(self.submitted_by) +", "None: self.site.update_current_progress() elif self.site is not None: self.site.update_status() if self.form_status is None: if", "schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start,", "order = mo.get('order__max', 0) return order + 1 else: if not Stage.objects.filter(project=project).exists(): return", "question = first_children['name'] question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer", "== 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label'", "\"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self): return", "if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')),", "first_children['name'] question_type = first_children['type'] answer= '' if question in json_answer: if first_children['type'] ==", "question_type = first_children['type'] answer= '' if question in json_answer: if first_children['type'] == 'note':", "if not self.stage else False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return", "first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] answer = '' if", "m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if", "= models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form", "= models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True)", "first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time',", "\"real\" save() method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\", "= mo.get('order__max', 0) return order + 1 def __unicode__(self): return getattr(self, \"name\", \"\")", "or first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question]", "1: raise Exception( u\"Multiple formhub nodes within main instance node\") elif len(formhub_nodes) ==", "self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status]", "= models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs", "the id \" u\"attribute, can't tell which is the main one\") instance_node =", "models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified =", "check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import", "models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True,", "False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi", "= Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if", "get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site,", "return None def save(self, *args, **kwargs): if self.xls and not self.xml: survey =", "site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def", "Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return", "0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self):", "FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property def site_name(self): if self.site is", "import post_save, pre_delete from django.utils.translation import ugettext_lazy as _ from django.dispatch import receiver", "get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else:", "ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and not self.is_staged: if", "str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self,", "__unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created =", "return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf def", "else: answer = json_answer[question] if 'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question,", "= self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\"", "super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if", "m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return", "clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf':", "get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" +", "getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group", "else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class", "not None: self.site.update_current_progress() elif self.site is not None: self.site.update_status() if self.form_status is None:", "# if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf,", "@property def site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self):", "import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import Site, Project, Organization from", "description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms", "kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return", "def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None):", "self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return", "if self.site is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is", "filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together =", "os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml", "file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8')", "calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml", "except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created", "kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status =", "not self.stage else \"SubStage\" def is_main_stage(self): return True if not self.stage else False", "def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return", "doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains", "# continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) #", "xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class", "project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf':", "django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions", "return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0,", "1: raise Exception(u\"Multiple instance nodes without the id \" u\"attribute, can't tell which", "InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status", "the first child whose id attribute matches our id_string survey_nodes = [node for", "= models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True,", "'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message", "parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question", "= p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1", "Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return", "# for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if", "not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if", "*args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict =", "= p1.search(xml) if m1: return m1.group(1) return None def check_version(xml, n): for i", "'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering =", "**kwargs): self.version = self.get_version if self.project_fxf is not None and self.project_fxf.is_staged and self.site", "(2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0,", "Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return", "verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self, *args, **kwargs): if", "return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id", "stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0,", "tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name", "async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager):", "# get the first child whose id attribute matches our id_string survey_nodes =", "encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re =", "formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes if", "site.type: # if site.type.id not in pss.tags: # continue # site_sub_stage = Stage(name=pss.name,", "nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1) return None def save(self,", "if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf,", "Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order", "is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')),", "= project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status,", "if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')),", "in project_sub_stages: # if pss.tags and site.type: # if site.type.id not in pss.tags:", "GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def", "self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms',", "fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf:", "return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def", "models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together =", "nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) return None def check_version(xml,", "models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self):", "if instance.project is not None: pass elif instance.is_staged: pass else: fxf = instance", "whose id attribute matches our id_string survey_nodes = [node for node in instance_node.childNodes", "is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status", "FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified", "FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project =", "node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not", "= p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1", "self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists():", "self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform:", "project_info(self): if self.fsform: self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self):", "models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\",", "_set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set UUID node in XML. \"\"\"", "model_nodes[0] instance_nodes = [node for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and", "self.stage else \"SubStage\" def is_main_stage(self): return True if not self.stage else False def", "# date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, #", "= models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs", "formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the calculate bind node calculate_node =", "the main one\") instance_node = instance_nodes[0] # get the first child whose id", "self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def", "first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question in", "'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object):", "form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and self.stage: return self.stage.id", "if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate", "+'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in first_children: question = first_children['label'] row={\"type\":question_type,", "Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization", "# if not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description,", "None def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\")", "Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if site: if not Stage.objects.filter(site=site).exists():", "Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project,", "Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\",", "Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False", "super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance =", "= (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256)", "if self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled and not", "xml = self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1)", "check_version(xml, n): for i in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\"", "\"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",)", "'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end')", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk':", "def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if", "\"id_string\": self.id_string } ) def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self):", "stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order'))", "return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1:", "def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count()", "class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight", "] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created =", "selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL)", "if not Stage.objects.filter(site=site).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists():", "def fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf:", "site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for", "== 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer", "blank=True, null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf", "if created: # project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms", "_(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return", "file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time", "has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF)", "'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer =", "if pss.tags and site.type: # if site.type.id not in pss.tags: # continue #", "models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join(", "model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re", "return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is None: update_data = {}", "default=u'') @property def get_version(self): import re n = XML_VERSION_MAX_ITER xml = self.xml p", "date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id", "m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if", "FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True,", "= FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager,", "= models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted", "import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import", "Stage.objects.filter(site=site).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1", "copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created = kwargs.get('created') # if created:", "if g_question+\"/\"+question in json_answer: if question_type == 'note': answer= '' elif question_type ==", "return m1.group(1) return None def save(self, *args, **kwargs): if self.xls and not self.xml:", "nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\"", "}) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf':", "GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class", "first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video': answer =", "self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re =", "child whose id attribute matches our id_string survey_nodes = [node for node in", "get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give complete url for", "related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status =", "return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return None @property def has_versions(self):", "== 'note': answer= '' elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or", "version = models.CharField(max_length=255, default=u'') @property def get_version(self): import re n = XML_VERSION_MAX_ITER xml", "related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects =", "mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: mo", "= Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue # site_main_stage =", "date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project", "for first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group':", "!= -1: self.has_start_time = True else: self.has_start_time = False def get_survey(self): if not", "def create_messages(sender, instance, created, **kwargs): if instance.project is not None and created and", "= instance_nodes[0] # get the first child whose id attribute matches our id_string", "not self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule:", "= self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring =", "return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled", "class Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\",", "not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError:", "get the first child whose id attribute matches our id_string survey_nodes = [node", "Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf", "calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) return None def", "@staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage):", "return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id':", "site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages: #", "Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from", "if update_data is None: update_data = {} created = False try: fspi =", "null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status", "continue answer = '' if g_question+\"/\"+question in json_answer: if question_type == 'note': answer=", "calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind", "elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo =", "raise Exception(u\"Multiple instance nodes without the id \" u\"attribute, can't tell which is", "choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class", "first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in", "update_data=None): if update_data is None: update_data = {} created = False try: fspi", "self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call", "which is the main one\") instance_node = instance_nodes[0] # get the first child", "FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')), })", "nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\"", "None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return None", "instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data =", "m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return", "return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return", "= bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model):", "kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return", "elif self.site is not None: self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status", "hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*')", "1: raise Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0] instance_nodes = [node", "related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags", "= models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form", "form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status", "and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple", "devs to make them easier. from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools", "return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None", "blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0)", "choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True)", "= doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</',", "= models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs", "super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\" def", "answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer}", "{1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed", "class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save,", "starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time =", "Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) ==", "Stages\") ordering = (\"order\",) def save(self, *args, **kwargs): if self.stage: self.group = self.stage.group", "# fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True)", "reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk})", "import XML_VERSION_MAX_ITER #To get domain to give complete url for app devs to", "organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog')", "IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created", "None def check_version(xml, n): for i in range(n, 0, -1): p = re.compile(\"\"\"<bind", "blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True)", "raise Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0] instance_nodes = [node for", "order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): #", "is not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order", "True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self):", "r_question in json_answer: for gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type =", "Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form", "Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(project=project,", "media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label']", "m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if", "and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form", "1 def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index", "len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the calculate bind", "title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property", "Schedule Data')), }) if not self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf,", "= _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",) def", "def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id})", "elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video': answer", "first_children['name'] question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = ''", "= models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\",", "first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time)", "import ValidationError from django.core.urlresolvers import reverse from django.db import models from django.db.models import", "form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if not", "class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31,", "re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) p1 =", "Node from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils", "\"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack", "if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if", "_(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf", "1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo", "save(self, *args, **kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self):", "Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self, *args,", "'' elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video':", "if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if", "= 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question", "def get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self): return True", "= FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False,", "# # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: #", "models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project,", "self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf is not None", "p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 =", "g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name'] question_type = first_children['type']", "null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS)", "verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf =", "project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue", "> 0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title", "json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False,", "fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property def site_name(self): if", "xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return getattr(self, \"name\", \"\")", "'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name']", "first_children['type'] question = first_children['name'] answer = '' if 'label' in first_children: question =", "and site.type: # if site.type.id not in pss.tags: # continue # site_sub_stage =", "first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type']", "kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if", "FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), })", "@staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count()", "with the id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for", "schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level", "try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi =", "models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule,", "version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml)", "= self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return", "from django.utils.translation import ugettext_lazy as _ from django.dispatch import receiver from jsonfield import", "datetime import os import json import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields", "if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if", "= models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged =", "is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')), }) else: if", "self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif self.site is not None: self.site.update_status()", "> 1: raise Exception( u\"Multiple formhub nodes within main instance node\") elif len(formhub_nodes)", "models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)", "default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True,", "and n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes", "None and created and not instance.is_staged: send_message_project_form(instance) elif created and instance.site is not", "date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'',", "\"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml =", "gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else:", ") def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def", "= DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question", "mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else:", "if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise", "json_answer: if question_type == 'note': answer= '' elif question_type == 'photo' or question_type", "None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export',", "getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class", "self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used", "fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class", "self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1) version =", "Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\")", "project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: # schedule = schedule_form.schedule #", "related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf", "\"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data", "s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class", "not in pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description,", "null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk),", "is the main one\") instance_node = instance_nodes[0] # get the first child whose", "related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education", "stage): if site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage is not None:", "def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self): status =", "self.has_start_time = False def get_survey(self): if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder()", "= self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object):", "= GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural", "= _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\",", "and not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id", "from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node", "= models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0,", "blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)", "False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return", "not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() #", "blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id) ,self.offline_site_id) return u\"%s\"", "date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization,", "models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\")", "version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m:", "\"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site,", "null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id) ,self.offline_site_id) return", "m1 = p.search(xml) if m1: return m1.group(1) return None def save(self, *args, **kwargs):", "for gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type'] question =", "models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta:", "ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk:", "instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True,", "= survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version:", "u\"Multiple formhub nodes within main instance node\") elif len(formhub_nodes) == 1: formhub_node =", "blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version", "instance_nodes[0] # get the first child whose id attribute matches our id_string survey_nodes", "not Stage.objects.filter(site=site).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return", "get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf is", "return getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def", "\"SubStage\" def is_main_stage(self): return True if not self.stage else False def sub_stage_count(self): if", "else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return", "site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status,", "if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for", "Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self, *args, **kwargs):", "from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import", "instance_nodes = [node for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower()", "description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0)", "json import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields", "== self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and", "order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return", "node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))]", "json_answer[question] if 'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted", "= models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False,", "models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering =", "in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if", "= models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization", "= models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name", "Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True", "def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk", "import unicode_literals import datetime import os import json import re from django.contrib.auth.models import", "import json import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from", "in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion", "id \" u\"attribute, can't tell which is the main one\") instance_node = instance_nodes[0]", "question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type", "weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages: # if pss.tags and site.type:", "data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project =", "calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind", "= text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml", "def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json)", "self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return u\"%s\" % str(self.submitted_by) + \"---\"", "else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self):", "== 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer", "form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def", "self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self)", "= first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for", "= models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table =", "site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages =", "else: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] answer =", "str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer", "models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True,", "models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\",", "order = mo.get('order__max', 0) return order + 1 def __unicode__(self): return getattr(self, \"name\",", "= models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True,", "= models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs", "form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return u\"%s\" % str(self.submitted_by)", "tell which is the main one\") instance_node = instance_nodes[0] # get the first", "site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id,", "p1.search(xml) if m1: return m1.group(1) return None def check_version(xml, n): for i in", "instance.project is not None: pass elif instance.is_staged: pass else: fxf = instance send_message(fxf)", "else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self):", "self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput", "= models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project,", "or question_type == 'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question]", "old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs", "return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if self.is_staged: if", "receiver from jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import", "= FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self, *args,", "XML. \"\"\" if not file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc", "project_sub_stages: # if pss.tags and site.type: # if site.type.id not in pss.tags: #", "blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level", "return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return", "1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage is not None: if", "\"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\", "= models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS)", "instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes)", "'<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls", "output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm,", "= ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural =", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\")", "models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project", "attribute matches our id_string survey_nodes = [node for node in instance_node.childNodes if node.nodeType", "= p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1", "verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date =", "'' if question in json_answer: if first_children['type'] == 'note': answer= '' elif first_children['type']", "the id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for n", "FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else", "def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1]", "general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) #", "'{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return u\"%s\" %", "os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version')", "models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta:", "True if not self.stage else False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count()", "DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site,", "for app devs to make them easier. from django.contrib.sites.models import Site as DjangoSite", "complete url for app devs to make them easier. from django.contrib.sites.models import Site", "Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0", "blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(),", "formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) == 0:", "question = first_children['name'] question_type = first_children['type'] answer= '' if question in json_answer: if", "def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False)", "Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform", "project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages: # if pss.tags and", "UUID node in XML. \"\"\" if not file_name: file_name = self.file_name() file_name, file_ext", "Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True)", "choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed =", "row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] ==", "def save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf is not None and", "FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization,", "Max from django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy as _ from", "fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id", "else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question,", "models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0)", "in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml)", "question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name']", "1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order +", "is not None: pass elif instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages,", "stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def clean(self):", "= first_children['name'] question_type = first_children['type'] answer= '' if question in json_answer: if first_children['type']", "multiple model nodes\") model_node = model_nodes[0] instance_nodes = [node for node in model_node.childNodes", "onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings", "if self.form_exists() else None def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf", "schedule_forms: # schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name,", "n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise Exception(", "= p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1", "send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from", "order + 1 def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day =", "fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey", "\"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without", "self.has_start_time = True else: self.has_start_time = False def get_survey(self): if not hasattr(self, \"_survey\"):", "question_type == 'note': answer= '' elif question_type == 'photo' or question_type == 'audio'", "m1.group(1) return None def check_version(xml, n): for i in range(n, 0, -1): p", "= doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid)", "instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf", "*args, **kwargs): if self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json()", "FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model):", "str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title", "range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if", "first_children in g_object['children']: question = first_children['name'] question_type = first_children['type'] if question_type == 'group':", "answer= '' if question in json_answer: if first_children['type'] == 'note': answer= '' elif", "if m: return m.group(1) version = check_version(xml) if version: return version else: p", "return \"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled and not self.is_staged: return", "= [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'),", "xml.dom import Node from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField", "m1 = p1.search(xml) if m1: return m1.group(1) return None def check_version(xml, n): for", "self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory, self).save(*args,", "calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack #", "class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class", "answer= '' elif question_type == 'photo' or question_type == 'audio' or question_type ==", "is_deleted=False) # for schedule_form in schedule_forms: # schedule = schedule_form.schedule # selected_days =", "Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0] instance_nodes = [node for node", "import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom", "within main instance node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node", "sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project is not None: pass elif", "def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\"", "_(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() >", "u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\"", "DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),]", "= [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name", "'note': answer= '' elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type']", "_ from django.dispatch import receiver from jsonfield import JSONField from pyxform import create_survey_from_xls,", "u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self):", "data.append(r_question) if r_question in json_answer: for gnr_answer in json_answer[r_question]: for first_children in r_object['children']:", "node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes)", "not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs):", "upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together", "uuid_nodes = [node for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName", "= GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\",", "self.project_fxf is not None and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif", "== Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1:", "from django.core.urlresolvers import reverse from django.db import models from django.db.models import Max from", "self.get_version if self.project_fxf is not None and self.project_fxf.is_staged and self.site is not None:", "= models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project =", "Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf,", "**kwargs): if instance.project is not None and created and not instance.is_staged: send_message_project_form(instance) elif", "[node for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"]", "'{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf", "p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return m.group(1)", "def project_info(self): if self.fsform: self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists() def", "for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer = json_answer[r_question]", "= json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note':", "is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule", "shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site,", "EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site)", "if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self):", "is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form", "{} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created", "sender=Site) # def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created = kwargs.get('created')", "**kwargs) def get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self): return", "= instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy", "models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True,", "= models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField()", "self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml)", "and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif self.site is not None:", "SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey =", "related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf =", "models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text =", "ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id')", "% self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ #", "if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object):", "!= 1: raise Exception( u\"Multiple survey nodes with the id '%s'\" % self.id_string)", "\", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re", "site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={})", "models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2,", "survey_node = survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes if n.nodeType ==", "is not None and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif self.site", "def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return", "self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not self.stage", "= models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\")", "blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form", "r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer = ''", "SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1,", "http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml =", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering =", "else None @property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1", "return getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True)", "sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy = True def save(self,", "fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return", "% self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes if", "self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project", "is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project =", "FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not", "class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site =", "from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models from", "project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight)", "'note': answer= '' elif question_type == 'photo' or question_type == 'audio' or question_type", "len(model_nodes) != 1: raise Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0] instance_nodes", "InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model):", "in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question", "'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in", "% str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[]", "site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self):", "self.stage.id return None def stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if self.schedule:", "# Call the \"real\" save() method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id", "for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] answer = ''", "+ str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if", "'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self):", "1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order +", "self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project is not", "Exception( u\"Multiple formhub nodes within main instance node\") elif len(formhub_nodes) == 1: formhub_node", "return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self): status = 0 if", "== 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in json_answer: if question_type", "if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) !=", "blank=True) title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage,", "FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class", "mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data", "submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model):", "self.site is not None: self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status =", "from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give complete url for app", "bind to automatically set UUID node in XML. \"\"\" if not file_name: file_name", "models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage =", "null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self',", "self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and not", "is_main_stage(self): return True if not self.stage else False def sub_stage_count(self): if not self.stage:", "models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0,", "def get_order(cls, site, project, stage): if site: if not Stage.objects.filter(site=site).exists(): return 1 elif", "if r_question in json_answer: for gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type", "doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL)", "in g_object['children']: question = first_children['name'] question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children)", "== 'photo' or question_type == 'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+", "= re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1) return", "class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight", "return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if", "proxy = True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args,", "= models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class", "s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True,", "= models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255)", "model nodes\") model_node = model_nodes[0] instance_nodes = [node for node in model_node.childNodes if", "== 0: # append the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\",", "# site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() #", "site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) #", "for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if", "fsxf = pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) #", "models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'')", "super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save() method. @property def fsxfid(self): if", "ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name =", "**kwargs): if self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml", "form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return \"survey\"", "pss in project_sub_stages: # if pss.tags and site.type: # if site.type.id not in", "-1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return", "site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project,", "is not None: self.site.update_current_progress() elif self.site is not None: self.site.update_status() if self.form_status is", "models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global", "instance, using, **kwargs): if instance.project is not None: pass elif instance.is_staged: pass else:", "submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance,", "in schedule_forms: # schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s =", "# continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight)", "= models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version =", "form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is", "self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project:", "finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status =", "form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count():", "models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class", "if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without the id \" u\"attribute,", "django.db.models import Max from django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy as", "= _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def", "form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if", "kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def", "import Node from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from", "ugettext_lazy as _ from django.dispatch import receiver from jsonfield import JSONField from pyxform", "get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk", "= models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1])", "answer = '' if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer= ''", "= models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted =", "if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] ==", "FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists():", "= True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs)", "stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class", "survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes if node.nodeType ==", "fsform in fs_form_list] @property def site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\", "return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self): return", "prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\")", "in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else:", "# for schedule_form in schedule_forms: # schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all())", "not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists()", "question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if", "1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1", "FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True)", "== self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists():", "= GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version =", "'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in", "def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256,", "models from django.db.models import Max from django.db.models.signals import post_save, pre_delete from django.utils.translation import", "_(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={", "site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready", "= self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if", "(1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3,", "class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy = True def save(self, *args,", "os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\"", "node in XML. \"\"\" if not file_name: file_name = self.file_name() file_name, file_ext =", "instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True)", "= models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order =", "return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if", "\"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf", "node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without the id \"", "else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1", "if site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage is not None: if", "models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances',", "schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: # schedule =", "re n = XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml)", "model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if", "not None: self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else:", "from jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml", "null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name", "\"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the", "if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')),", "# created = kwargs.get('created') # if created: # project = site.project # project_main_stages", "= models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id =", "url for app devs to make them easier. from django.contrib.sites.models import Site as", "== Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes)", "null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project =", "return order + 1 def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day", "order = mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order'))", "= GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",)", "= re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return m.group(1) p1", "related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days',", "is not None: self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status", "= _(\"Library\") verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False)", "FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True)", "if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes)", "else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return", "= models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf =", "re.compile('version=\"(.*)\">') m = p.search(xml) if m: return m.group(1) version = check_version(xml) if version:", "contains multiple model nodes\") model_node = model_nodes[0] instance_nodes = [node for node in", "return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count()", "class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager,", "from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table", "r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo'", "project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: # continue # site_main_stage", "models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date", "return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def", "\"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if not self.is_scheduled and", "answer= '' elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] ==", "def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance,", "self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance,", "= Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss", "self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes)", "= models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF,", "return None def stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if self.schedule: return", "in pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage,", "\"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type =", "default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self):", "@property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def", "return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self):", "get_order(cls, site, project, stage): if site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage", "import reverse from django.db import models from django.db.models import Max from django.db.models.signals import", "\"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering = (\"-date_created\",) def url(self):", "Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists():", "= 1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id):", "from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance", "instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url", "description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User,", "else \"SubStage\" def is_main_stage(self): return True if not self.stage else False def sub_stage_count(self):", "for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form)", "= models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'')", "self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise", "(\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering =", "= GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk})", "is None: update_data = {} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data,", "text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml =", "instance_node = instance_nodes[0] # get the first child whose id attribute matches our", "data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True,", "nodes within main instance node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else:", "re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1) return None", "import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance", "XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind", "send_message_project_form(instance) elif created and instance.site is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete,", "instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project is not", "parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer in json_answer[r_question]:", "prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name'] question_type = first_children['type'] if question_type", "site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage is not None: if not", "= models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site", "models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender,", "= models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True,", "ValidationError(_('Form Already Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site,", "# site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save()", "== 1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes =", "Form Stages\") ordering = (\"order\",) def save(self, *args, **kwargs): if self.stage: self.group =", "data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username", "= models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs", "self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo()", "'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'),", "class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\",", "null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified =", "'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey:", "self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring", "domain to give complete url for app devs to make them easier. from", "Exception(u\"Multiple instance nodes without the id \" u\"attribute, can't tell which is the", "'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256,", "= models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def", "== 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the calculate bind node", "pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import Site, Project, Organization", "answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children:", "models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage = models.OneToOneField(Stage,", "self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') # hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/", "str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf", "if self.is_staged and self.stage: return self.stage.id return None def stage_name(self): if self.stage: return", "models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs =", "null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance:", "str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return '{0}", "= models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'')", "ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk", "0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def", "self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return", "\"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes within main instance", "models.TextField(default=u'', null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255)", "= models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage =", "self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) +", "= models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage", "_(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self):", "elif created and instance.site is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF)", "u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return None @property def has_versions(self): return", "in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not project_sub_stages: #", "question_type == 'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else:", "m.group(1) version = check_version(xml) if version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\"", "if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml)", "if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) #", "= models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return", "stage_forms__is_deployed=True) # if not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site,", "is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def", "FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False,", "== Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise", "in json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer", "Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise", "null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self',", "null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags =", "import os import json import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import", "django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models from django.db.models", "not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not None: return", "get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder =", "onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give complete url for app devs", "node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) != 1:", "= models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True,", "django.dispatch import receiver from jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from", "import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0,", "Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') #", "Stage.objects.filter(project=project).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1", "= re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date =", "XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import", "# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml", "text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF,", "XML_VERSION_MAX_ITER #To get domain to give complete url for app devs to make", "models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False)", "is None: return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" %", "new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta:", "return m1.group(1) return None def check_version(xml, n): for i in range(n, 0, -1):", "== self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id):", "return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else:", "# site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in", "models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True)", "related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions',", "= [node for node in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName ==", "ValidationError({ 'xf': ValidationError(_('Duplicate Stage Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk", "self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time = False def get_survey(self): if", "and self.site is not None: self.site.update_current_progress() elif self.site is not None: self.site.update_status() if", "= mo.get('order__max', 0) return order + 1 else: if not Stage.objects.filter(project=project).exists(): return 1", "def site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if", "if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question =", "created: # project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms in", "self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question =", "None and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif self.site is not", "[fsform.xf.pk for fsform in fs_form_list] @property def site_name(self): if self.site is not None:", "m1: return m1.group(1) return None def save(self, *args, **kwargs): if self.xls and not", "can't tell which is the main one\") instance_node = instance_nodes[0] # get the", "= models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True,", "models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog')", "first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else:", "return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url =", "reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id})", "_(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True,", "selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days)", "not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General", "models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project =", "if self.is_survey: return \"survey\" if not self.is_scheduled and not self.is_staged: return \"general\" def", "mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order =", "models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self):", "= models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together", "django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy as _ from django.dispatch import", "if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\"", "objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return", "= doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains multiple model nodes\") model_node", "from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To", "and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8'))", "null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True)", "return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order +", "related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True)", "mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is None: update_data =", "m: return m.group(1) version = check_version(xml) if version: return version else: p =", "import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain", "nodes\") model_node = model_nodes[0] instance_nodes = [node for node in model_node.childNodes if node.nodeType", "description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages: # if", "def save(self, *args, **kwargs): if self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json", "null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField() id_string =", "blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name =", "db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\")", "Exception( u\"Multiple survey nodes with the id '%s'\" % self.id_string) survey_node = survey_nodes[0]", "= Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status,", "date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\") stage", "self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged:", "set UUID node in XML. \"\"\" if not file_name: file_name = self.file_name() file_name,", "= _(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self, *args, **kwargs): if self.stage:", "site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if site: if not Stage.objects.filter(site=site).exists(): return", "== 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+", "question = first_children['name'] answer = '' if 'label' in first_children: question = first_children['label']", "mo.get('order__max', 0) return order + 1 def __unicode__(self): return getattr(self, \"name\", \"\") class", "node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance", "if not file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml)", "site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status =", "# selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) #", "gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name']", "elif question_type == 'photo' or question_type == 'audio' or question_type == 'video': answer", "get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\" def is_main_stage(self): return True if", "ordering = (\"order\",) def save(self, *args, **kwargs): if self.stage: self.group = self.stage.group super(Stage,", "= tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) #", "self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project is not None", "class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True)", "row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type = first_children['type'] question", "Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True,", "def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class", "ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string", "self.site is not None: self.site.update_current_progress() elif self.site is not None: self.site.update_status() if self.form_status", "created and instance.site is not None and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def", "first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or first_children['type'] == 'audio'", "\" u\"attribute, can't tell which is the main one\") instance_node = instance_nodes[0] #", "and created and not instance.is_staged: send_message_project_form(instance) elif created and instance.site is not None", "(1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS", "index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name =", "related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id", "node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) !=", "survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance", "= first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question", "get_survey(self): if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json)", "self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender,", "def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self):", "models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project,", "def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site =", "return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True,", "clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains multiple model", "p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 =", "m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return", "class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True)", "null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True)", "continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save()", "if self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if", "{}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project is", "(\"order\",) def save(self, *args, **kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs)", "'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question =", "def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description =", "if m1: return m1.group(1) return None def check_version(xml, n): for i in range(n,", "= models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project", "@property def get_version(self): import re n = XML_VERSION_MAX_ITER xml = self.xml p =", "id attribute matches our id_string survey_nodes = [node for node in instance_node.childNodes if", "Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version", "= os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise", "= models.CharField(max_length=255, default=u'') @property def get_version(self): import re n = XML_VERSION_MAX_ITER xml =", "Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 def __unicode__(self): return", "media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in first_children: question = first_children['label']", "# s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() #", "None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs)", "from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import", "+ 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order", "(1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True)", "g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name'] question_type =", "is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args,", "null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site", "calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \", encoding='utf-8') #", "related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end", "json_answer: for gnr_answer in json_answer[r_question]: for first_children in r_object['children']: question_type = first_children['type'] question", "date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user =", "in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not", "class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True,", "import XForm, Instance from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses", "'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children)", "return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if site: if not", "json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer=", "self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not", "django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db", "ValidationError from django.core.urlresolvers import reverse from django.db import models from django.db.models import Max", "related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled", "is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not None:", "}) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list]", "and not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project", "verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",)", "= 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question", "project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form", "== file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes", "id '%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for n in", "if question_type == 'note': answer= '' elif question_type == 'photo' or question_type ==", "class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True,", "getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self):", "project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group'", "from django.db.models import Max from django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy", "models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed", "\"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return", "if self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status", "\"\"\") m1 = p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\"", "if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate", "= JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project,", "= ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set UUID", "= clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains multiple", "FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs):", "id_string survey_nodes = [node for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and", "= models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering", "'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class", "# FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed =", "re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>',", "return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if self.site_fxf is None: return u\"%s\"", "Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True)", "def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True", "models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0,", "= first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type =", "@property def project_info(self): if self.fsform: self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists()", "def get_or_create(instance, update_data=None): if update_data is None: update_data = {} created = False", "models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self): return", "mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is None: update_data = {} created", "models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name =", "not None: pass elif instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF)", "if self.fsform: self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return", "get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(),", "'' if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer= '' elif first_children['type']", "models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage", "site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod", "Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form", "!= 1: raise Exception(u\"xml contains multiple model nodes\") model_node = model_nodes[0] instance_nodes =", "return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self): if not FieldSightXF.objects.filter(stage=self).count(): return None return FieldSightXF.objects.filter(stage=self)[0]", "date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True)", "def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod", "'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance =", "site = kwargs.get('instance') # created = kwargs.get('created') # if created: # project =", "True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def", "related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail',", "null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\")", "models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name=\"schedule_forms\")", "node\") elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"),", "using, **kwargs): if instance.project is not None: pass elif instance.is_staged: pass else: fxf", "'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer =", "}) if not self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists():", "to make them easier. from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import", "('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def", "models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf", "fs_form_list] @property def site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\ @property def", "elif len(formhub_nodes) == 1: formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild)", "Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False,", "= models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight =", "import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1,", "ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True)", "not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without the id", "blank=True,) shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs", "= models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True)", "temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF,", "xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project = models.ForeignKey(Project,", "schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True)", "p.search(xml) if m: return m.group(1) version = check_version(xml) if version: return version else:", "\"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self):", "= models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False)", "get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property def site_name(self):", "} ) def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk)", "def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save,", "p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1)", "return order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0)", "'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type'] answer= '' if question", "def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True else:", "builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8'))", "for fsform in fs_form_list] @property def site_name(self): if self.site is not None: return", "sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project is not None and created", "reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self): return '{0}", "\"\"\") m1 = p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\"", "site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls, site, project, stage): if site: if", "# def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created = kwargs.get('created') #", "ValidationError(_('Duplicate Schedule Data')), }) if not self.is_scheduled and not self.is_staged: if self.site: if", "self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\")", "Node.ELEMENT_NODE and (node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception(", "return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return getattr(self, \"name\", \"\") class", "fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return", "self).save(*args, **kwargs) # Call the \"real\" save() method. @property def fsxfid(self): if self.project_fxf:", "Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model):", "in formhub_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) ==", "return m.group(1) version = check_version(xml) if version: return version else: p = re.compile(\"\"\"<bind", "null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User,", "= FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__']", "models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True,", "True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return", "if not Stage.objects.filter(project=project).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists():", "mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order =", "_update_fs_data = None class Meta: proxy = True def save(self, *args, **kwargs): self._update_fs_data", "= _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def form_exists(self):", "json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question)", "onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser import", "+ 1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage is not None:", "\"\") class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self,", "django.core.urlresolvers import reverse from django.db import models from django.db.models import Max from django.db.models.signals", "models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def", "0) return order + 1 def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model):", "if not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, #", "def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta:", "parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question = first_children['name'] question_type", "= XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if m:", "kwargs.get('instance') # created = kwargs.get('created') # if created: # project = site.project #", "models.OneToOneField(Stage, blank=True, null=True, related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform", "# return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled:", "g_question+\"/\"+question in json_answer: if question_type == 'note': answer= '' elif question_type == 'photo'", "kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self): return '{0} form {1}'.format(self.form_type(),", "def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title)", "if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in", "for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if", "except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey =", "return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date", "answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children:", "import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models", "'{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is", "self.site.update_status() if self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status =", "m = p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\")", "Meta: proxy = True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance,", "if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes within main instance node\")", "= models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True,", "elif instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data", "self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not", "= IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)", "GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural =", "self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,)", "as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2,", "models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects", "+'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type,", "i in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m =", "return getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True)", "inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date", "= self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save() method. @property def", "== 'note': answer= '' elif question_type == 'photo' or question_type == 'audio' or", "= models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural", "version = check_version(xml) if version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\"", "self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf':", "p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 =", "General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for", "if self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml =", "educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) #", "json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question =", "m1 = p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\")", "return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def", "= _(\"XForms\") ordering = (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username,", "one\") instance_node = instance_nodes[0] # get the first child whose id attribute matches", "else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else:", "models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml = models.TextField() id_string", "not None and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress() elif self.site is", "parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True)", "# set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs)", "logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version", "self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and self.stage: return self.stage.id return None", "= True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def", "Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in", "return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,)", "default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' #", "related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date", "reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\"", "file_name=None): \"\"\" Add bind to automatically set UUID node in XML. \"\"\" if", "not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return", "order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return", "{}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project", "\"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self):", "'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description = models.TextField(blank=True,", "name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order", "version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog')", "doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node)", "def parse_repeat(r_object): r_question = r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer in", "date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version = models.CharField(max_length=255,", "m1 = p.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\")", "order + 1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage is not", "models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True)", "class Meta: ordering = ['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self):", "JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import", "self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is None: update_data", "as _ from django.dispatch import receiver from jsonfield import JSONField from pyxform import", "project=self.project)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def", "m = p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i))", "def getname(self): if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0}", "question in json_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo'", "self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else", "n = XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if", "shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs =", "import ArrayField from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import", "if self.site_fxf is None: return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return", "calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return m1.group(1) return None def", "if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except", "0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count()", "= check_version(xml) if version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\")", "dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return", "= models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model):", "image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global =", "rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod", "n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes within", "+ \"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is", "JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\",", "first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer", "FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False) return fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False)", "method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def", "re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m = p.search(xml) if m: return m.group(1) p1 =", "if 'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by',", "json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain", "self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20)", "0: # append the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\"", "row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row) submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children'])", "is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True, null=True)", "= p.search(xml) if m1: return m1.group(1) return None def save(self, *args, **kwargs): if", "formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in", "or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question]", "self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def parse_repeat(r_object): r_question", "models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs =", "models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',) class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm)", "= 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name", "class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\")", "u\"attribute, can't tell which is the main one\") instance_node = instance_nodes[0] # get", "= models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances')", "\"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm, related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model):", "verbose_name_plural = _(\"Library\") ordering = (\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf =", "logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\")", "created = kwargs.get('created') # if created: # project = site.project # project_main_stages =", "= pss.stage_forms # site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save()", "return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs):", "create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if", "os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True", "else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id) def", "Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(site=site,", "survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) > 1:", "from django.db import models from django.db.models import Max from django.db.models.signals import post_save, pre_delete", "# site = kwargs.get('instance') # created = kwargs.get('created') # if created: # project", "'xf': ValidationError(_('Form Already Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,", "save() method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property", "class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project =", "def getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def", "calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind", "blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: verbose_name = _(\"Library\") verbose_name_plural = _(\"Library\") ordering", "the \"real\" save() method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return", "xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True,", "url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self):", "site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise ValidationError({", "re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml)", "return self.stage.id return None def stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if", "*args, **kwargs): self.version = self.get_version if self.project_fxf is not None and self.project_fxf.is_staged and", "is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk", "created, **kwargs): if instance.project is not None and created and not instance.is_staged: send_message_project_form(instance)", "FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data, async=False)", "= models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days =", "max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'')", "date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level =", "u'{}- {}- {}'.format(self.xf, self.site, self.is_staged) @receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if", "blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\")", "GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\",", "def is_main_stage(self): return True if not self.stage else False def sub_stage_count(self): if not", "data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message =", "== Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes) > 1: raise Exception( u\"Multiple", "for pss in project_sub_stages: # if pss.tags and site.type: # if site.type.id not", "return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return None @property", "shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True)", "**kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance,", "= [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'),", "= create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site", "def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1:", "from django.dispatch import receiver from jsonfield import JSONField from pyxform import create_survey_from_xls, SurveyElementBuilder", "fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms:", "builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey", "continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) #", "choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted = models.BooleanField(default=False) version =", "not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order =", "self.stage: return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if self.is_staged:", "[(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2,", "return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site", "xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False)", "stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 def __unicode__(self): return getattr(self,", "and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple", "\"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural = _(\"XForms\") ordering", "re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return m.group(1) p =", "self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"'", "m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if m1: return", "= models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\",", "site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms =", "try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml = bytes(bytearray(self.xml,", "get_or_create(instance, update_data=None): if update_data is None: update_data = {} created = False try:", "(\"-shared_date\",) class EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title =", "_(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class", "else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node in formhub_node.childNodes", "first_children['type'] == 'audio' or first_children['type'] == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else:", "easier. from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL =", "Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering", "xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description = models.TextField(default=u'', null=True) xml =", "models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\" %", "= False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True", "json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def", "models.BooleanField(default=False) version = models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs =", "FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site", "[n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"]", "[node for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name", "if self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None", "self.xls and not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml()", "self.stage: return self.stage.id return None def stage_name(self): if self.stage: return self.stage.name def schedule_name(self):", "formhub_nodes = [n for n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName", "def get_survey(self): if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\", "site_name(self): if self.site is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site", "project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False,", "'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS =", "if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max', 0)", "and node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0:", "created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return", "% file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml = doc.toprettyxml(indent=\" \",", "_(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self,", "@property def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version if", "return order + 1 else: if not Stage.objects.filter(project=project).exists(): return 1 elif stage is", "max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created =", "n in survey_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == \"formhub\"] if len(formhub_nodes)", "p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return m1.group(1)", "self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return", "json_answer = self.instance.json json_question = json.loads(self.instance.xform.json) base_url = DjangoSite.objects.get_current().domain media_folder = self.instance.xform.user.username def", "deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self,", "not instance.is_staged: send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project is", "first_children['name'] answer = '' if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question,", "SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1,", "= FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property def site_name(self): if self.site", "= 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form Groups\") ordering", "form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def", "if not self.stage else \"SubStage\" def is_main_stage(self): return True if not self.stage else", "answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer}", "= models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created =", "self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None: return", "self.xml = inlineOutput xform = models.ForeignKey(XForm, related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to,", "= schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, #", "self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged", ")*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>',", "= False def get_survey(self): if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey", "on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date =", "default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self): import re n = XML_VERSION_MAX_ITER", "self.version = self.get_version if self.project_fxf is not None and self.project_fxf.is_staged and self.site is", "= _(\"FieldSight Form Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\")", "SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import Site,", "'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None):", "def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return getattr(self, \"name\",", "survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version", "update_data is None: update_data = {} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk)", "None: pass elif instance.is_staged: pass else: fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class", "else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if", "weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table", "0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: # append the calculate bind node calculate_node", "= models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def", "= models.IntegerField(default=2, choices=SHARED_LEVEL) schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog')", "first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in", "site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site,", "if version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m =", "append the calculate bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name)", "# site_main_stage.save() # for pss in project_sub_stages: # if pss.tags and site.type: #", "models.CharField(max_length=31, blank=True, null=True) text = models.TextField(blank=True, null=True) stage = models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True)", "related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material =", "{1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None:", "#To get domain to give complete url for app devs to make them", "0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m:", "len(formhub_nodes) > 1: raise Exception( u\"Multiple formhub nodes within main instance node\") elif", "= re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) p1", "= models.ForeignKey(User, related_name=\"submission_comments\") logs = GenericRelation('eventlog.FieldSightLog') class Meta: ordering = ['-date'] def get_absolute_url(self):", "'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def", "# project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in project_sub_stages: # if pss.tags", "return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(), self.site_fxf.xf.title,) def __unicode__(self): if", "Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 else: if not", "nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\"", "class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site, related_name=\"site_forms\", null=True, blank=True) project", "self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf else: return", "form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not", "getname(self): if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form", "= models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User,", "self.form_exists() else None def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf =", "self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id = self.site_fxf_id else: fxf_id", "= FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist: created = True fspi = FieldSightParsedInstance(instance=instance) fspi.save(update_fs_data=update_data,", "== 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type'] answer= '' if", "= gnr_answer[r_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row)", "None def stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name", "0 else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def", "GenericRelation('eventlog.FieldSightLog') @property def get_version(self): return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version", "getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site", "related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) site =", "not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def", "unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\") verbose_name_plural", "super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if update_data is None:", "Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if", "if self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time = False def get_survey(self):", "EducationMaterial(models.Model): is_pdf = models.BooleanField(default=False) pdf = models.FileField(upload_to=\"education-material-pdf\", null=True, blank=True) title = models.CharField(max_length=31, blank=True,", "= models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, related_name=\"form_group\")", "related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified", "hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey = \\ builder.create_survey_element_from_json(self.json) except ValueError: xml", "import datetime import os import json import re from django.contrib.auth.models import User from", "not Stage.objects.filter(project=project).exists(): return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return", "FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None", "and not self.xml: survey = create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean()", "null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls', os.path.split(filename)[1]) class XformHistory(models.Model): class", "# xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form", "question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in json_answer: if", "= '' if r_question+\"/\"+question in gnr_answer: if first_children['type'] == 'note': answer= '' elif", "(node.tagName == file_name or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple survey", "self.site.update_current_progress() elif self.site is not None: self.site.update_status() if self.form_status is None: if self.site_fxf:", "date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name =", "data.append(row) else: for first_children in r_object['children']: question_type = first_children['type'] question = first_children['name'] answer", "for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName == file_name or", "def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False def form_name(self): if", "self.site is not None: return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not", "\"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\", \"'%s'\" % self.uuid) model_node.appendChild(calculate_node) self.xml =", "@property def fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if", "len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes with the id '%s'\" %", "p.search(xml) if m: return m.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 =", "@classmethod def get_order(cls, site, project, stage): if site: if not Stage.objects.filter(site=site).exists(): return 1", "None: if not Stage.objects.filter(stage=stage).exists(): return 1 else: mo = Stage.objects.filter(stage=stage).aggregate(Max('order')) order = mo.get('order__max',", "def get_version(self): import re n = XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">')", "null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end =", "= self.get_version if self.project_fxf is not None and self.project_fxf.is_staged and self.site is not", "survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory,", "if len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes with the id '%s'\"", "= models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True,", "if site.type.id not in pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site,", "def send_delete_message(sender, instance, using, **kwargs): if instance.project is not None: pass elif instance.is_staged:", "== 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label'", "m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m1 = p.search(xml) if", "null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image = models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',)", "if node.nodeType == Node.ELEMENT_NODE and node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\"))", "Meta: db_table = 'fieldsight_forms_group' verbose_name = _(\"FieldSight Form Group\") verbose_name_plural = _(\"FieldSight Form", "if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else", "ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to", "XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m = p.search(xml) if m: return", "'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children", "gnr_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or first_children['type']", "them easier. from django.contrib.sites.models import Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL", "not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return", "or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question]", "= ('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False", "pss.tags and site.type: # if site.type.id not in pss.tags: # continue # site_sub_stage", "null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table", "models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days,", "raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')), }) else: if FieldSightXF.objects.filter(xf=self.xf,", "node.tagName == \"uuid\"] if len(uuid_nodes) == 0: formhub_node.appendChild(doc.createElement(\"uuid\")) if len(formhub_nodes) == 0: #", "def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property", "= models.OneToOneField(Stage, related_name=\"em\", null=True, blank=True) fsxf = models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model):", "= p.search(xml) if m: return m.group(1) version = check_version(xml) if version: return version", "models.BooleanField(default=False) from_project = models.BooleanField(default=True) default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta:", "n): for i in range(n, 0, -1): p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i))", "related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted = models.BooleanField(default=False) is_survey = models.BooleanField(default=False) from_project = models.BooleanField(default=True)", "= first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question", "return order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0)", "= models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule", "'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'),", "+ str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None: return", "answer = json_answer[question] if 'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer}", "in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # #", "True else: self.has_start_time = False def get_survey(self): if not hasattr(self, \"_survey\"): try: builder", "# and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></',", "# schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site,", "not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if", "send_message(instance) @receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project is not None:", "= True else: self.has_start_time = False def get_survey(self): if not hasattr(self, \"_survey\"): try:", "if not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1]", "in XML. \"\"\" if not file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name)", "and self.schedule: return self.schedule.id if self.is_staged and self.stage: return self.stage.id return None def", "name = models.CharField(\"Schedule Name\", max_length=256, blank=True, null=True) site = models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True)", "def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\")", "Site as DjangoSite from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'),", "Groups\") ordering = (\"-date_modified\",) def __unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name", "not self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({", "else: return self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf", "= models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS) user = models.ForeignKey(User, related_name=\"submission_comments\") logs =", "{}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict", "for schedule_form in schedule_forms: # schedule = schedule_form.schedule # selected_days = tuple(schedule.selected_days.all()) #", "IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from", "def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self): if", "form_changed = models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site = models.ForeignKey(Site, related_name=\"deploy_data\",", "class Meta: proxy = True def save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {})", "onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get", "default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data", "related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status", "post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy = True def", "models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS)", "related_name=\"fshistory\") date = models.DateTimeField(auto_now=True) xls = models.FileField(upload_to=upload_to, null=True) json = models.TextField(default=u'') description =", "u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def project_info(self): if self.fsform: self.fsform.pk return None @property def", "# s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model):", "first_children['name'] group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer: if first_children['type']", "self.fsform.pk return None @property def has_versions(self): return self.xf.fshistory.exists() def __unicode__(self): return u'{}- {}-", "__unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField()", "= 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start',", "verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering = ('-date_range_start', 'date_range_end') def", "if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if", "if question in json_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] ==", "form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True) submitted_by = models.ForeignKey(User, related_name=\"supervisor\") is_deleted", "'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question", "models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL) project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True,", "models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule'", "formhub_node = formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for", "'xf': ValidationError(_('Duplicate General Form Data')), }) @staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return", "return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def get_order(cls,", "= kwargs.get('created') # if created: # project = site.project # project_main_stages = project.stages.filter(stage__isnull=True)", "is_deployed=True, site=site, # xf=general_form.xf, fsform=general_form) # # schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) #", "self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Stage", "if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml)", "class FieldSightFormLibrary(models.Model): xf = models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization =", "def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id,", "True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists()", "return [fsform.xf.pk for fsform in fs_form_list] @property def site_name(self): if self.site is not", "'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question =", "1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return", "class XformHistory(models.Model): class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add", "= {} created = False try: fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk) fspi.save(update_fs_data=update_data, async=False) except FieldSightParsedInstance.DoesNotExist:", "mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1 def", "is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True,", "= r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer in json_answer[r_question]: for first_children", "models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name", "onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give complete", "= mo.get('order__max', 0) return order + 1 else: mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order')) order", "not file_name: file_name = self.file_name() file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes", "else None def __unicode__(self): return getattr(self, \"name\", \"\") class DeletedXForm(models.Model): xf = models.OneToOneField(XForm,", "def form_type_id(self): if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and self.stage: return", "answer = '' if g_question+\"/\"+question in json_answer: if question_type == 'note': answer= ''", "matches our id_string survey_nodes = [node for node in instance_node.childNodes if node.nodeType ==", "site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count() @classmethod def", "null=True, blank=True) project = models.ForeignKey(Project, related_name=\"project_forms\", null=True, blank=True) is_staged = models.BooleanField(default=False) is_scheduled =", "else False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self):", "status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod", "FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk ==", "property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance,", "status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return status @property def form_count(self):", "kwargs={'is_project':0, 'pk':self.site_id}) def form_type(self): if self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if", "@staticmethod def get_xform_id_list(site_id): fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id') return [fsform.xf.pk for fsform in fs_form_list] @property", "if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({ 'xf': ValidationError(_('Duplicate Schedule Data')), }) if", "models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site =", "choices=FORM_STATUS) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_data' # unique_together = ((\"xf\",", "post_save, pre_delete from django.utils.translation import ugettext_lazy as _ from django.dispatch import receiver from", "if instance.project is not None and created and not instance.is_staged: send_message_project_form(instance) elif created", "is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk == self.pk: raise", "for pms in project_main_stages: # project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True) # if not", "data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']: question =", "save(self, *args, **kwargs): self._update_fs_data = kwargs.pop('update_fs_data', {}) super(FieldSightParsedInstance, self).save(*args, **kwargs) def to_dict_for_mongo(self): mongo_dict", "else False def form_name(self): if not FieldSightXF.objects.filter(stage=self).count(): return \"\" return FieldSightXF.objects.filter(stage=self)[0].xf.title def form(self):", "= json_answer[question] if 'label' in first_children: question = first_children['label'] row={\"type\":question_type, \"question\":question, \"answer\":answer} data.append(row)", "getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self):", "question = first_children['name'] group_answer = json_answer[r_question] answer = '' if r_question+\"/\"+question in gnr_answer:", "super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances')", "os import json import re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation", "project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename): return os.path.join( 'versions', str(instance.pk), 'xls',", "**kwargs) # Call the \"real\" save() method. @property def fsxfid(self): if self.project_fxf: return", "self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance') site = models.ForeignKey(Site, null=True, related_name='site_instances') project", "from onadata.libs.utils.model_tools import set_uuid SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL", "None @property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status = 1 return", "from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from", "def stage_name(self): if self.stage: return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def", "m1: return m1.group(1) return None def check_version(xml, n): for i in range(n, 0,", "is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule = models.OneToOneField(Schedule, blank=True,", "fspi, created class FInstanceManager(models.Manager): def get_queryset(self): return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self):", "not project_sub_stages: # continue # site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id,", "general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False,", "['-date'] def get_absolute_url(self): return reverse('forms:alter-status-detail', kwargs={'pk': self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(),", "is_deployed=True, is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site,", "@receiver(post_save, sender=FieldSightXF) def create_messages(sender, instance, created, **kwargs): if instance.project is not None and", "'%s'\" % self.id_string) survey_node = survey_nodes[0] formhub_nodes = [n for n in survey_node.childNodes", "@receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created =", "models.ForeignKey(Site, related_name=\"schedules\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"schedules\", null=True, blank=True) date_range_start = models.DateField(default=datetime.date.today)", "or node.attributes.get('id'))] if len(survey_nodes) != 1: raise Exception( u\"Multiple survey nodes with the", "if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: #", "first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']:", "self.pk}) def getname(self): return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged,", "return [] @property def xf(self): return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None @property def", "send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta: proxy = True", "r_question = r_object['name'] data.append(r_question) if r_question in json_answer: for gnr_answer in json_answer[r_question]: for", "null=True) xml = models.TextField() id_string = models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid", "= site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for pms in project_main_stages: # project_sub_stages", "\"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } ) def getname(self): return '{0} form", "re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) return None", "create_survey_from_xls, SurveyElementBuilder from pyxform.xform2json import create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import", "ordering = ('-date_range_start', 'date_range_end') def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else", "@property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else: return self.site_fxf.id\\ @property def fsxf(self):", "# general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms: #", "json_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or first_children['type']", "__unicode__(self): if self.instance: return u\"%s ---------------%s\" % (str(self.instance.id) ,self.offline_site_id) return u\"%s\" % str(self.offline_site_id)", "schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL) date_created = models.DateTimeField(auto_now_add=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table", "'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description", "return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists()", "related_name=\"comments\") message = models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status", "date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form,", "without the id \" u\"attribute, can't tell which is the main one\") instance_node", "= re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput =", "ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight", "def copy_stages_from_project(sender, **kwargs): # site = kwargs.get('instance') # created = kwargs.get('created') # if", "__unicode__(self): return getattr(self, \"day\", \"\") class Schedule(models.Model): name = models.CharField(\"Schedule Name\", max_length=256, blank=True,", "\"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json) def get_responces(self): data=[] json_answer = self.instance.json", "= survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml() if not self.version: self.version = self.get_version", "+'attachments/'+json_answer[g_question+\"/\"+question] else: answer = json_answer[g_question+\"/\"+question] if 'label' in first_children: question = first_children['label'] row={'type':question_type,", "data class InstanceStatusChanged(models.Model): finstance = models.ForeignKey(FInstance, related_name=\"comments\") message = models.TextField(null=True, blank=True) date =", "unicode_literals import datetime import os import json import re from django.contrib.auth.models import User", "form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return FieldSightXF.objects.filter(schedule=self)[0]", "return 1 elif stage is not None: if not Stage.objects.filter(stage=stage).exists(): return 1 else:", "= output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform =", "'Global'), (1, 'Organization'), (2, 'Project'),] SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),]", "blank=True, null=True, related_name=\"offline_submission\") fieldsight_form = models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if", "site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for", "self.finstance.site_fxf.xf.title) class InstanceImages(models.Model): instance_status = models.ForeignKey(InstanceStatusChanged, related_name=\"images\") image = models.ImageField(upload_to=\"submission-feedback-images\", verbose_name='Status Changed Images',)", "models.ForeignKey(XForm) is_global = models.BooleanField(default=False) shared_date = models.DateTimeField(auto_now=True) organization = models.ForeignKey(Organization, null=True, blank=True) project", "= property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance =", "fxf = instance send_message(fxf) post_save.connect(create_messages, sender=FieldSightXF) class FieldSightParsedInstance(ParsedInstance): _update_fs_data = None class Meta:", "= re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/_version_\" \"\"\") m1 = p.search(xml) if m1: return m1.group(1) p1", "question_type = first_children['type'] question = first_children['name'] answer = '' if 'label' in first_children:", "stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) date_created = models.DateTimeField(auto_now_add=True)", "kwargs.get('created') # if created: # project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) #", "data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif", "self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1,", "if self.is_scheduled and self.schedule: return self.schedule.id if self.is_staged and self.stage: return self.stage.id return", "project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances') form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS) date = models.DateTimeField(auto_now=True)", "FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model):", "parse_repeat(first_children) elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type']", "+ \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def", "models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False) is_deleted =", "self.site_fxf.id\\ @property def fsxf(self): if self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self):", "**kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) !=", "p.search(xml) if m1: return m1.group(1) return None def save(self, *args, **kwargs): if self.xls", "give complete url for app devs to make them easier. from django.contrib.sites.models import", "self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1,", "blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") shared_level =", "= 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering", "day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\", \"\") class", "self.site.username, \"id_string\": self.id_string } ) def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def", "re from django.contrib.auth.models import User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField", "from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm, Instance from onadata.apps.logger.xform_instance_parser", "'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] == \"repeat\":", "get_version(self): import re n = XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m", "# hack # http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\\ # and-silly-whitespace/ text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n(", "self.is_scheduled: return \"scheduled\" if self.is_staged: return \"staged\" if self.is_survey: return \"survey\" if not", "if self.stage: return self.stage.name def schedule_name(self): if self.schedule: return self.schedule.name def clean(self): if", "from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml from onadata.apps.viewer.models import ParsedInstance from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form from", "'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question] else: answer = json_answer[question] if 'label' in first_children: question =", "= kwargs.get('instance') # created = kwargs.get('created') # if created: # project = site.project", "parse_individual_questions(parent_object): for first_children in parent_object: if first_children['type'] == \"repeat\": parse_repeat(first_children) elif first_children['type'] ==", "in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\" and not node.hasAttribute(\"id\")]", "'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural = _(\"FieldSight Form Stages\") ordering =", "None: return u\"%s\" % str(self.submitted_by) + \"---\" + self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by)", "0) return order + 1 else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max',", "calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind", "if self.site_fxf is None: return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,) return '{0} form {1}'.format(self.site_fxf.form_type(),", "nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return m.group(1) p = re.compile(\"\"\"<bind calculate=\"(.*)\"", "\"#/\" + str(self.instance.id) def get_abr_form_status(self): return dict(FORM_STATUS)[self.form_status] def getname(self): if self.site_fxf is None:", "xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data =", "self.stage else False def sub_stage_count(self): if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def", "[(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name =", "# s.selected_days.add(*selected_days) # s.save() # FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form, # schedule=s, is_deployed=True)", "self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self): return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self):", "blank=True) project = models.ForeignKey(Project, null=True, blank=True) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table =", "= models.BooleanField(default=False) is_scheduled = models.BooleanField(default=False) date_created = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now=True) schedule =", "and not node.hasAttribute(\"id\")] if len(instance_nodes) != 1: raise Exception(u\"Multiple instance nodes without the", "text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL) output_re = re.compile('\\n.*(<output.*>)\\n( )*') prettyXml = text_re.sub('>\\g<1></', self.xml.decode('utf-8')) inlineOutput", "return get_instances_for_field_sight_form(self.pk) def getlatestsubmittiondate(self): if self.site is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return", "by', 'answer':json_answer['_submitted_by']} submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']} data.append(submitted_by) data.append(submittion_time) parse_individual_questions(json_question['children']) return data class InstanceStatusChanged(models.Model): finstance", "__unicode__(self): return getattr(self, \"name\", \"\") class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True,", "inlineOutput = output_re.sub('\\g<1>', prettyXml) inlineOutput = re.compile('<label>\\s*\\n*\\s*\\n*\\s*</label>').sub( '<label></label>', inlineOutput) self.xml = inlineOutput xform", "check_version(xml) if version: return version else: p = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version_\" \"\"\") m", "ready = models.BooleanField(default=False) project_stage_id = models.IntegerField(default=0) weight = models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[])", "@receiver(pre_delete, sender=FieldSightXF) def send_delete_message(sender, instance, using, **kwargs): if instance.project is not None: pass", "is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True, blank=True)", "unique=True) description = models.TextField(blank=True, null=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) creator =", "self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not self.stage else \"SubStage\"", "\"\") class Stage(models.Model): name = models.CharField(max_length=256) description = models.TextField(blank=True, null=True) group = models.ForeignKey(FormGroup,related_name=\"stage\",", "return u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name) return", "Data')), }) if not self.is_scheduled and not self.is_staged: if self.site: if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False,", "related_name=\"deleted_xform\") date_created = models.DateTimeField(auto_now=True) class FieldSightXF(models.Model): xf = models.ForeignKey(XForm, related_name=\"field_sight_form\") site = models.ForeignKey(Site,", "site = models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF,", "main one\") instance_node = instance_nodes[0] # get the first child whose id attribute", "model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) != 1: raise Exception(u\"xml contains multiple model nodes\")", "in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_individual_questions(parent_object): for first_children", "return os.path.split(self.xls.name)[-1] def _mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time =", "self.schedule: return self.schedule.id if self.is_staged and self.stage: return self.stage.id return None def stage_name(self):", "not None and created and not instance.is_staged: send_message_project_form(instance) elif created and instance.site is", "= self.stage.group super(Stage, self).save(*args, **kwargs) def get_display_name(self): return \"Stage\" if not self.stage else", "= first_children['type'] question = first_children['name'] answer = '' if 'label' in first_children: question", "elif first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type'] answer=", "site = models.ForeignKey(Site, related_name=\"deploy_data\", null=True) project = models.ForeignKey(Project, related_name=\"deploy_data\", null=True) def upload_to(instance, filename):", "null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False) project_stage_id =", "models.TextField(null=True, blank=True) date = models.DateTimeField(auto_now=True) old_status = models.IntegerField(default=0, choices=FORM_STATUS) new_status = models.IntegerField(default=0, choices=FORM_STATUS)", "# if created: # project = site.project # project_main_stages = project.stages.filter(stage__isnull=True) # for", "Organization from onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models", "'audio' or question_type == 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+\"/\"+question] else: answer =", "'question':question, 'answer':answer} data.append(row) else: for first_children in r_object['children']: question_type = first_children['type'] question =", "instance nodes without the id \" u\"attribute, can't tell which is the main", "default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True) # site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True,", "is_deleted=False) # for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, #", "None @property def xf(self): return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None def __unicode__(self): return", "def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9) index =", "self.form_exists() else None @property def form_status(self): status = 0 if self.stage_forms.site_form_instances.filter(form_status=3).exists(): status =", "bind node calculate_node = doc.createElement(\"bind\") calculate_node.setAttribute( \"nodeset\", \"/%s/formhub/uuid\" % file_name) calculate_node.setAttribute(\"type\", \"string\") calculate_node.setAttribute(\"calculate\",", "'' if g_question+\"/\"+question in json_answer: if question_type == 'note': answer= '' elif question_type", "if m1: return m1.group(1) p1 = re.compile(\"\"\"<bind calculate=\"(.*)\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml)", "= models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self): import re n", "'' if 'label' in first_children: question = first_children['label'] row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def", "[node for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() == \"instance\"", "# for pss in project_sub_stages: # if pss.tags and site.type: # if site.type.id", "save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf is not None and self.project_fxf.is_staged", "return self.instance.json['__version__'] def save(self, *args, **kwargs): self.version = self.get_version if self.project_fxf is not", "m1.group(1) return None def save(self, *args, **kwargs): if self.xls and not self.xml: survey", "models.CharField(editable=False, max_length=255) title = models.CharField(editable=False, max_length=255) uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255,", "return super(FInstanceManager, self).get_queryset().filter(is_deleted=False) class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance", "_(\"FieldSight Form Stages\") ordering = (\"order\",) def save(self, *args, **kwargs): if self.stage: self.group", "is not None and created and not instance.is_staged: send_message_project_form(instance) elif created and instance.site", "# schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: # schedule", "create_survey_element_from_xml from xml.dom import Node from onadata.apps.fieldsight.models import Site, Project, Organization from onadata.apps.fsforms.fieldsight_models", "import re n = XML_VERSION_MAX_ITER xml = self.xml p = re.compile('version=\"(.*)\">') m =", "= models.ForeignKey(FieldSightXF, related_name=\"offline_submissiob\" , null=True, blank=True) def __unicode__(self): if self.instance: return u\"%s ---------------%s\"", "models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\")", "uuid = models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self): import re", "site.type.id not in pss.tags: # continue # site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, #", "'group': parse_group(g_question+\"/\",first_children) continue answer = '' if g_question+\"/\"+question in json_answer: if question_type ==", "Data')), }) if self.is_scheduled: if FieldSightXF.objects.filter(schedule=self.schedule).exists(): if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk: raise ValidationError({", "= models.CharField(max_length=255, default=u'') objects = FInstanceManager() deleted_objects = FInstanceDeletedManager() logs = GenericRelation('eventlog.FieldSightLog') @property", "group = models.ForeignKey(FormGroup,related_name=\"stage\", null=True, blank=True) order = IntegerRangeField(min_value=0, max_value=30,default=0) stage = models.ForeignKey('self', blank=True,", "else: mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order')) order = mo.get('order__max', 0) return order + 1", "is_deployed=True, is_deleted=False) # for schedule_form in schedule_forms: # schedule = schedule_form.schedule # selected_days", "if self.project_fxf is not None and self.project_fxf.is_staged and self.site is not None: self.site.update_current_progress()", "Stage(name=pms.name, order=pms.order, site=site, description=pms.description, # project_stage_id=pms.id, weight=pms.weight) # site_main_stage.save() # for pss in", "form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True, null=True, related_name=\"parent\") is_deployed = models.BooleanField(default=False)", "class Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to", "is not None: return self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: #", "reverse from django.db import models from django.db.models import Max from django.db.models.signals import post_save,", "(2, 'Flagged'), (3, 'Approved'), ] class FormGroup(models.Model): name = models.CharField(max_length=256, unique=True) description =", "site_sub_stage = Stage(name=pss.name, order=pss.order, site=site, # description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight) # site_sub_stage.save() #", "= formhub_nodes[0] else: formhub_node = survey_node.insertBefore( doc.createElement(\"formhub\"), survey_node.firstChild) uuid_nodes = [node for node", "= models.ForeignKey(Site, null=True, related_name='site_instances') project = models.ForeignKey(Project, null=True, related_name='project_instances') site_fxf = models.ForeignKey(FieldSightXF, null=True,", "models.CharField(max_length=32, default=u'') version = models.CharField(max_length=255, default=u'') @property def get_version(self): import re n =", "_mark_start_time_boolean(self): starttime_substring = 'jr:preloadParams=\"start\"' if self.xml.find(starttime_substring) != -1: self.has_start_time = True else: self.has_start_time", "related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project = models.ForeignKey(Project, null=True,", "weight=pss.weight) # site_sub_stage.save() # if FieldSightXF.objects.filter(stage=pss).exists(): # fsxf = pss.stage_forms # site_form =", "('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set UUID node", "self.id_string } ) def getname(self): return '{0} form {1}'.format(self.form_type(), self.xf.title,) def getresponces(self): return", "False def get_survey(self): if not hasattr(self, \"_survey\"): try: builder = SurveyElementBuilder() self._survey =", "site_form.save() # general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False) # for general_form in general_forms:", "self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save() method. @property def fsxfid(self):", "onadata.apps.fsforms.fieldsight_models import IntegerRangeField from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version from onadata.apps.logger.models import XForm,", "question_type = first_children['type'] question = first_children['name'] group_answer = json_answer[r_question] answer = '' if", "return None return FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage:", "GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form", "def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically set UUID node in XML.", "= re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/_version__00{0}\" \"\"\".format(i)) m = p.search(xml) if m: return m.group(1) p", "if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project).exists(): if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False, site=self.site, project=self.project)[0].pk", "+ 1 def __unicode__(self): return getattr(self, \"name\", \"\") class Days(models.Model): day = models.CharField(max_length=9)", "Meta: unique_together = ('xform', 'version') def _set_uuid_in_xml(self, file_name=None): \"\"\" Add bind to automatically", "Call the \"real\" save() method. @property def fsxfid(self): if self.project_fxf: return self.project_fxf.id else:", "u'{}'.format(self.site.name)\\ @property def site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name)", "survey_nodes = [node for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName", "'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in g_object['children']:", "import get_instances_for_field_sight_form from onadata.settings.local_settings import XML_VERSION_MAX_ITER #To get domain to give complete url", "return self.schedule.name def clean(self): if self.is_staged: if FieldSightXF.objects.filter(stage=self.stage).exists(): if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk:", "(2, 'Monthly'),] FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ]", "= [node for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE and (node.tagName ==", "@staticmethod def get_or_create(instance, update_data=None): if update_data is None: update_data = {} created =", "self.site_fxf_id else: fxf_id = self.project_fxf_id return \"/forms/forms/\" + str(fxf_id) + \"#/\" + str(self.instance.id)", "User from django.contrib.contenttypes.fields import GenericRelation from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError", "GenericRelation('eventlog.FieldSightLog') class Meta: db_table = 'fieldsight_forms_stage' verbose_name = _(\"FieldSight Form Stage\") verbose_name_plural =", "= models.IntegerField(default=0) tags = ArrayField(models.IntegerField(), default=[]) logs = GenericRelation('eventlog.FieldSightLog') class Meta: db_table =", "bytes(bytearray(self.xml, encoding='utf-8')) self._survey = create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id", "self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count() > 0", "db_table = 'fieldsight_forms_schedule' verbose_name = _(\"Form Schedule\") verbose_name_plural = _(\"Form Schedules\") ordering =", "= (\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string }", "creator = models.ForeignKey(User, related_name=\"form_group\") is_global = models.BooleanField(default=False) organization = models.ForeignKey(Organization, null=True, blank=True) project", "models.ForeignKey(Site, related_name=\"stages\", null=True, blank=True) project = models.ForeignKey(Project, related_name=\"stages\", null=True, blank=True) ready = models.BooleanField(default=False)", "file_name, file_ext = os.path.splitext(file_name) doc = clean_and_parse_xml(self.xml) model_nodes = doc.getElementsByTagName(\"model\") if len(model_nodes) !=", "= models.DateField(default=datetime.date.today) date_range_end = models.DateField(default=datetime.date.today) selected_days = models.ManyToManyField(Days, related_name='days', blank=True,) shared_level = models.IntegerField(default=2,", "@staticmethod def rejected_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count() @staticmethod def flagged_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2,", "= [node for node in model_node.childNodes if node.nodeType == Node.ELEMENT_NODE and node.tagName.lower() ==", "to_dict_for_mongo(self): mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo() mongo_dict.update(self._update_fs_data) return mongo_dict @staticmethod def get_or_create(instance, update_data=None): if", "# return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export',", "class FInstanceDeletedManager(models.Manager): def get_queryset(self): return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True) class FInstance(models.Model): instance = models.OneToOneField(Instance, related_name='fieldsight_instance')", "in gnr_answer: if first_children['type'] == 'note': answer= '' elif first_children['type'] == 'photo' or", "return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk}) return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id}) else: # return reverse('forms:formpack_html_export', kwargs={'fsxf_id':", "= models.OneToOneField(FieldSightXF, related_name=\"em\", null=True, blank=True) class EducationalImages(models.Model): educational_material = models.ForeignKey(EducationMaterial, related_name=\"em_images\") image =", "self.form_status is None: if self.site_fxf: self.form_status = self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance,", "tuple(schedule.selected_days.all()) # s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start, # date_range_end=schedule.date_range_end) # s.selected_days.add(*selected_days) # s.save()", "def form_exists(self): return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False def form(self): return", "return None def check_version(xml, n): for i in range(n, 0, -1): p =", "= create_survey_from_xls(self.xls) self.json = survey.to_json() self.xml = survey.to_xml() self._mark_start_time_boolean() # set_uuid(self) # self._set_uuid_in_xml()", "question_type = first_children['type'] if question_type == 'group': parse_group(g_question+\"/\",first_children) continue answer = '' if", "# self._set_uuid_in_xml() if not self.version: self.version = self.get_version super(XformHistory, self).save(*args, **kwargs) def file_name(self):", "FieldSightXF.objects.filter(stage=self)[0] def active_substages(self): return self.parent.filter(stage_forms__isnull=False) def get_sub_stage_list(self): if not self.stage: return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id') return", "our id_string survey_nodes = [node for node in instance_node.childNodes if node.nodeType == Node.ELEMENT_NODE", "self.project_fxf.xf.title return u\"%s\" % str(self.submitted_by) + \"---\" + self.site_fxf.xf.title def instance_json(self): return json.dumps(self.instance.json)", "if not self.stage: return Stage.objects.filter(stage=self).count() return 0 def form_exists(self): return True if FieldSightXF.objects.filter(stage=self).count()", "!= 1: raise Exception(u\"Multiple instance nodes without the id \" u\"attribute, can't tell", "import Max from django.db.models.signals import post_save, pre_delete from django.utils.translation import ugettext_lazy as _", "== 'video': answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+\"/\"+question] else: answer = gnr_answer[r_question+\"/\"+question] if 'label'", "= models.ImageField(upload_to=\"education-material-images\", verbose_name='Education Images',) # @receiver(post_save, sender=Site) # def copy_stages_from_project(sender, **kwargs): # site", "def site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property def", "\"\"\") m1 = p1.search(xml) if m1: return m1.group(1) return None def check_version(xml, n):", "\"survey\" if not self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self): if self.is_scheduled", "if self.project_fxf: return self.project_fxf else: return self.site_fxf def get_absolute_url(self): if self.site_fxf: fxf_id =", "create_survey_element_from_xml(xml) return self._survey survey = property(get_survey) class SubmissionOfflineSite(models.Model): offline_site_id = models.CharField(max_length=20) temporary_site =", "is_scheduled=False, is_staged=False,project=self.site.project).exists(): raise ValidationError({ 'xf': ValidationError(_('Form Already Used in Project Level')), }) else:", "status = 1 return status @property def form_count(self): return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id,", "# unique_together = ((\"xf\", \"site\"), (\"xf\", \"is_staged\", \"stage\"),(\"xf\", \"is_scheduled\", \"schedule\")) verbose_name = _(\"XForm\")", "site, project, stage): if site: if not Stage.objects.filter(site=site).exists(): return 1 elif stage is", "def save(self, *args, **kwargs): if self.stage: self.group = self.stage.group super(Stage, self).save(*args, **kwargs) def", "@property def site_or_project_display(self): if self.site is not None: return u'{}'.format(self.site.name) return u'{}'.format(self.project.name) @property", "return self.stage_forms.site_form_instances.all().count() @staticmethod def site_submission_count(id, site_id): return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count() @staticmethod def rejected_submission_count(id, site_id): return", "first_children['type'] == 'group': parse_group(\"\",first_children) else: question = first_children['name'] question_type = first_children['type'] answer= ''", "related_name=\"stage_forms\") shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL) form_status = models.IntegerField(default=0, choices=FORM_STATUS) fsform = models.ForeignKey('self', blank=True,", "return \"survey\" if not self.is_scheduled and not self.is_staged: return \"general\" def form_type_id(self): if", "nodes without the id \" u\"attribute, can't tell which is the main one\")", "self.site_fxf.default_submission_status else: self.form_status = self.project_fxf.default_submission_status super(FInstance, self).save(*args, **kwargs) # Call the \"real\" save()", "# for general_form in general_forms: # FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site, # xf=general_form.xf,", "is_deployed=True) class DeployEvent(models.Model): form_changed = models.BooleanField(default=True) data = JSONField(default={}) date = models.DateTimeField(auto_now=True) site", "row={'type':question_type, 'question':question, 'answer':answer} data.append(row) def parse_group(prev_groupname, g_object): g_question = prev_groupname+g_object['name'] for first_children in", "p1 = re.compile(\"\"\"<bind calculate=\"\\'(.*)\\'\" nodeset=\"/(.*)/__version__\" \"\"\") m1 = p1.search(xml) if m1: return m1.group(1)", "offline_site_id = models.CharField(max_length=20) temporary_site = models.ForeignKey(Site, related_name=\"offline_submissions\") instance = models.OneToOneField(FInstance, blank=True, null=True, related_name=\"offline_submission\")", "(\"-date_created\",) def url(self): return reverse( \"download_fild_sight_form\", kwargs={ \"site\": self.site.username, \"id_string\": self.id_string } )", "self.site_form_instances.order_by('-pk').values('date')[:1] else: return self.project_form_instances.order_by('-pk').values('date')[:1] def get_absolute_url(self): if self.project: # return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk})", "class Days(models.Model): day = models.CharField(max_length=9) index = models.IntegerField() def __unicode__(self): return getattr(self, \"day\"," ]
[ ".base import PipBaseRecipe class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends", "from .base import PipBaseRecipe class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs)", "import PipBaseRecipe class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends =", "**kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name = 'pygit2' self.version = '0.24.0'", "def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name = 'pygit2'", "PipBaseRecipe class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2']", "PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name =", "<gh_stars>0 from .base import PipBaseRecipe class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args,", "*args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name = 'pygit2' self.version =", "class PyGit2Recipe(PipBaseRecipe): def __init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name", "__init__(self, *args, **kwargs): super(PyGit2Recipe, self).__init__(*args, **kwargs) self.depends = ['libgit2'] self.name = 'pygit2' self.version" ]
[ "cursor.executemany( 'insert into messages values (?, ?, ?)', message_rows ) cursor.executemany('insert into tags", "values (?, ?, ?)', message_rows ) cursor.executemany('insert into tags values (?, ?, ?)',", "self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?, ?)', message_rows ) cursor.executemany('insert into", "def __init__(self, connection): self._connection = connection def write_messages(self, messages): message_rows = [(message.id, message.text,", "message_writers.message_writer import MessageWriter from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection", "connection): self._connection = connection def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for", "= [] for message in messages: tags = tags + message.tags tag_rows =", "message in messages: tags = tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id)", "tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?,", "Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def write_messages(self, messages): message_rows", "messages] tags = [] for message in messages: tags = tags + message.tags", "message in messages] tags = [] for message in messages: tags = tags", "= self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?, ?)', message_rows ) cursor.executemany('insert", "write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for message in messages] tags =", "tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?, ?)', message_rows", "def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for message in messages] tags", "tags = tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag in", "from message_writers.message_writer import MessageWriter from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection):", "in messages] tags = [] for message in messages: tags = tags +", "for tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages values (?,", "tag_rows = [(tag.category, tag.text, tag.message_id) for tag in tags] cursor = self._connection.cursor() cursor.executemany(", "import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def write_messages(self, messages):", "in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?, ?)',", "entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def write_messages(self,", "self._connection = connection def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for message", "= connection def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for message in", "cursor = self._connection.cursor() cursor.executemany( 'insert into messages values (?, ?, ?)', message_rows )", "+ message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag in tags] cursor =", "for message in messages] tags = [] for message in messages: tags =", "MessageWriter from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection", "messages: tags = tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag", "[] for message in messages: tags = tags + message.tags tag_rows = [(tag.category,", "messages values (?, ?, ?)', message_rows ) cursor.executemany('insert into tags values (?, ?,", "into messages values (?, ?, ?)', message_rows ) cursor.executemany('insert into tags values (?,", "= tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag in tags]", "__init__(self, connection): self._connection = connection def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id)", "<reponame>Kaltsoon/telegram-analytics from message_writers.message_writer import MessageWriter from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self,", "in messages: tags = tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for", "message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag in tags] cursor = self._connection.cursor()", "= [(message.id, message.text, message.user_id) for message in messages] tags = [] for message", "tags = [] for message in messages: tags = tags + message.tags tag_rows", "tag.message_id) for tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages values", "(?, ?, ?)', message_rows ) cursor.executemany('insert into tags values (?, ?, ?)', tag_rows)", "[(tag.category, tag.text, tag.message_id) for tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into", "tags + message.tags tag_rows = [(tag.category, tag.text, tag.message_id) for tag in tags] cursor", "import MessageWriter from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection =", "from entities.message import Message class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def", "tag.text, tag.message_id) for tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert into messages", "connection def write_messages(self, messages): message_rows = [(message.id, message.text, message.user_id) for message in messages]", "DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def write_messages(self, messages): message_rows = [(message.id,", "messages): message_rows = [(message.id, message.text, message.user_id) for message in messages] tags = []", "for message in messages: tags = tags + message.tags tag_rows = [(tag.category, tag.text,", "'insert into messages values (?, ?, ?)', message_rows ) cursor.executemany('insert into tags values", "= [(tag.category, tag.text, tag.message_id) for tag in tags] cursor = self._connection.cursor() cursor.executemany( 'insert", "?, ?)', message_rows ) cursor.executemany('insert into tags values (?, ?, ?)', tag_rows) self._connection.commit()", "[(message.id, message.text, message.user_id) for message in messages] tags = [] for message in", "message.text, message.user_id) for message in messages] tags = [] for message in messages:", "message_rows = [(message.id, message.text, message.user_id) for message in messages] tags = [] for", "message.user_id) for message in messages] tags = [] for message in messages: tags", "class DatabaseMessageWriter(MessageWriter): def __init__(self, connection): self._connection = connection def write_messages(self, messages): message_rows =" ]
[ "print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 '", "article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph in [x", "\"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={}", "if len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists():", "model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word", "= \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK =", "TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\")", "python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json import subprocess from pathlib", "import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH =", "SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\"", "TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE", "VOC_SIZE = 7500 PAD = 1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH)", "' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" )", "TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD", "f: with open(TMPPATH, \"w\") as fw: for _, line in tqdm(enumerate(f.readlines())): article =", "print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with", "in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def", "MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK", "200 or filter_texts(paragraph): continue for sentence in [x for x in paragraph.split(\"。\") if", "as fw: for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in", "article = json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue for", "def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw: for _,", "< 200 or filter_texts(paragraph): continue for sentence in [x for x in paragraph.split(\"。\")", "TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True,", "else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp", "clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX", "OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\"", "= json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue for title,", "\"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train", "filter_texts(paragraph): continue for sentence in [x for x in paragraph.split(\"。\") if len(x) >", "\"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500", "np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word", "in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if", "in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph", "len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt()", "continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in", "json import subprocess from pathlib import Path import sentencepiece as spm import joblib", "from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD", ") def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\")", "= \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD =", "--unigram \"\"\" import gzip import json import subprocess from pathlib import Path import", "clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue for sentence in [x for", "from tqdm import tqdm from opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts,", "algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([", "stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000", "algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word)", "\"\"\" import gzip import json import subprocess from pathlib import Path import sentencepiece", "joblib import numpy as np import click from tqdm import tqdm from opencc", "json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue for title, section", "title in SECTION_BLACKLIST: continue for paragraph in [x for x in section.split(\"\\n\") if", "else False algorithm = \"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word,", "10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word:", "'--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH,", "import tqdm from opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH", "else TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens),", "with open(TMPPATH, \"w\") as fw: for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line)", "_, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\",", "1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\")", "title = CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph in [x for", "\"\"\"SentencePiece Tokenization for Wiki Dataset Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import", "DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC", "7500 PAD = 1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as f:", "in [x for x in section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph)", "= True if word else False algorithm = \"bpe\" if bpe else \"unigram\"", "subprocess from pathlib import Path import sentencepiece as spm import joblib import numpy", "sentencepiece as spm import joblib import numpy as np import click from tqdm", "open(TMPPATH, \"w\") as fw: for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if", "scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json import subprocess from pathlib import", "tqdm import tqdm from opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST", "x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"):", "False algorithm = \"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm)", "MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor()", "if seg_word else TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence))", "for x in section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph)", "seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH,", "if len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph):", "default=True) def main(word, bpe): seg_word = True if word else False algorithm =", "= subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res)", "0 def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw: for", "def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res", "= \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC =", "\"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model", "'--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) )", "TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp =", "if len(paragraph) < 200 or filter_texts(paragraph): continue for sentence in [x for x", "sentence in [x for x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence +", "\".model\") tokens = [] with open(TMPPATH_WORD if seg_word else TMPPATH) as f: for", "f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True if", "@click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True if word", "TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={}", "'--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\"", "for paragraph in [x for x in section.split(\"\\n\") if len(x) > 50]: paragraph", "not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\",", "joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True", "= CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph in [x for x", "Wiki Dataset Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json", "or filter_texts(paragraph): continue for sentence in [x for x in paragraph.split(\"。\") if len(x)", "= \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE =", "\"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\")", "import numpy as np import click from tqdm import tqdm from opencc import", "if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"):", "in [x for x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\")", "filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX =", "PAD = 1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as f: with", "Tokenization for Wiki Dataset Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip", "\"列表\" in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title)", "segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ],", ") joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word =", "as f: with open(TMPPATH, \"w\") as fw: for _, line in tqdm(enumerate(f.readlines())): article", "zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph in", "UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as", "open(TMPPATH_WORD if seg_word else TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append(", "word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD", "bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ == \"__main__\": #", "spm import joblib import numpy as np import click from tqdm import tqdm", "' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE,", "= 1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH,", "as np import click from tqdm import tqdm from opencc import OpenCC from", "OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK = 0 def json_to_txt(): with", "\"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ == \"__main__\": # pylint: disable=no-value-for-parameter", "], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} '", "# fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ == \"__main__\": # pylint: disable=no-value-for-parameter main()", "f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\",", "--word --unigram \"\"\" import gzip import json import subprocess from pathlib import Path", "True if word else False algorithm = \"bpe\" if bpe else \"unigram\" #", "if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue for title, section in", "\"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK = 0", "# Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995", "+ \".model\") tokens = [] with open(TMPPATH_WORD if seg_word else TMPPATH) as f:", "def main(word, bpe): seg_word = True if word else False algorithm = \"bpe\"", "print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\",", "main(word, bpe): seg_word = True if word else False algorithm = \"bpe\" if", "@click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True if word else", "click from tqdm import tqdm from opencc import OpenCC from wiki_tokenize_json import clean_text,", "= 7500 PAD = 1 UNK = 0 def json_to_txt(): with gzip.open(DATAPATH) as", "= \"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__", "numpy as np import click from tqdm import tqdm from opencc import OpenCC", "\"w\") as fw: for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\"", "if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\",", "Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\",", ") ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) +", "seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format(", "seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\")", "fw: for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"]", "else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ == \"__main__\": # pylint:", "CC.convert(title) if title in SECTION_BLACKLIST: continue for paragraph in [x for x in", "np import click from tqdm import tqdm from opencc import OpenCC from wiki_tokenize_json", "as spm import joblib import numpy as np import click from tqdm import", "len(paragraph) < 200 or filter_texts(paragraph): continue for sentence in [x for x in", "\"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"],", "--vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word),", "> 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if", "= OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK = 0 def json_to_txt():", "for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST:", "= 0 def json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw:", "from opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\"", "subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) #", "in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if", "tokens = [] with open(TMPPATH_WORD if seg_word else TMPPATH) as f: for _,", "if title in SECTION_BLACKLIST: continue for paragraph in [x for x in section.split(\"\\n\")", "* python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json import subprocess from", "import joblib import numpy as np import click from tqdm import tqdm from", "50]: paragraph = clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue for sentence", "--model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm,", "algorithm = \"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if", "tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe):", "Path import sentencepiece as spm import joblib import numpy as np import click", "section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST: continue for", "continue for sentence in [x for x in paragraph.split(\"。\") if len(x) > 10]:", "fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res =", "_, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"] or \"列表\"", "opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH", "Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format(", "paragraph in [x for x in section.split(\"\\n\") if len(x) > 50]: paragraph =", "if word else False algorithm = \"bpe\" if bpe else \"unigram\" # fit_model(seg_word,", "as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command()", "CC = OpenCC('t2s') VOC_SIZE = 7500 PAD = 1 UNK = 0 def", "for x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True,", "bpe): seg_word = True if word else False algorithm = \"bpe\" if bpe", "spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD if seg_word", "x in section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph) <", "> 50]: paragraph = clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue for", "@click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True if word else False algorithm", "with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw: for _, line in", "import subprocess from pathlib import Path import sentencepiece as spm import joblib import", "word else False algorithm = \"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm)", "tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word,", "article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title", "import Path import sentencepiece as spm import joblib import numpy as np import", "+ \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word", "if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ == \"__main__\":", "title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title = CC.convert(title) if title in SECTION_BLACKLIST: continue", "import json import subprocess from pathlib import Path import sentencepiece as spm import", "import sentencepiece as spm import joblib import numpy as np import click from", "TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={}", "wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD =", "line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in", "in SECTION_BLACKLIST: continue for paragraph in [x for x in section.split(\"\\n\") if len(x)", "import click from tqdm import tqdm from opencc import OpenCC from wiki_tokenize_json import", "paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not", "fw.write(sentence + \"。\\n\") def fit_model(seg_word=True, algorithm=\"bpe\"): if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing", "[x for x in section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph) if", "= [] with open(TMPPATH_WORD if seg_word else TMPPATH) as f: for _, sentence", "is_flag=True) @click.option(\"--bpe/--unigram\", default=True) def main(word, bpe): seg_word = True if word else False", "or \"列表\" in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title =", "= spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD if", "algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD if seg_word else TMPPATH)", "tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]: continue", "sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True) @click.option(\"--bpe/--unigram\", default=True)", "[x for x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence + \"。\\n\") def", "\"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s')", "with open(TMPPATH_WORD if seg_word else TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())):", "VOC_SIZE, algorithm=\"unigram\" ) ) def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm,", "for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) ) joblib.dump(np.array(tokens), f\"data/tokens_{algorithm}_{seg_word}.pkl\") @click.command() @click.option(\"--word\", is_flag=True)", "SECTION_BLACKLIST: continue for paragraph in [x for x in section.split(\"\\n\") if len(x) >", "sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD", "Dataset Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json import", "import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH = \"/mnt/Intel/zhwiki.json.gz\" TMPPATH = \"/mnt/Intel/tmp_texts.txt\" TMPPATH_WORD = \"/mnt/Intel/tmp_words.txt\"", "tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens =", "tqdm from opencc import OpenCC from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST DATAPATH =", "import gzip import json import subprocess from pathlib import Path import sentencepiece as", "section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph) < 200 or", "in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"] or \"列表\" in article[\"title\"]:", "seg_word = True if word else False algorithm = \"bpe\" if bpe else", "if not Path(TMPPATH).exists(): json_to_txt() if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\",", "sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD if seg_word else", "json_to_txt(): with gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw: for _, line", "[] with open(TMPPATH_WORD if seg_word else TMPPATH) as f: for _, sentence in", "\"/mnt/Intel/tmp_words.txt\" MODEL_PREFIX = \"data/{algorithm}_{seg_word}_model\" CC = OpenCC('t2s') VOC_SIZE = 7500 PAD = 1", "\"bpe\" if bpe else \"unigram\" # fit_model(seg_word, algorithm) tokenize(seg_word, algorithm) if __name__ ==", "print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if", "\"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training", "json_to_txt() if seg_word: print(\"Performing word segmentation...\") res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\",", "def tokenize(seg_word=True, algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens", "paragraph = clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue for sentence in", "Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import json import subprocess", "pathlib import Path import sentencepiece as spm import joblib import numpy as np", "gzip import json import subprocess from pathlib import Path import sentencepiece as spm", "in section.split(\"\\n\") if len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph) < 200", "\"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\") spm.SentencePieceTrainer.Train(", "seg_word else TMPPATH) as f: for _, sentence in tqdm(enumerate(f.readlines())): tokens.append( np.array(sp.EncodeAsIds(sentence)) )", "for sentence in [x for x in paragraph.split(\"。\") if len(x) > 10]: fw.write(sentence", "= clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue for sentence in [x", "spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD if seg_word else", "algorithm=\"bpe\"): print(\"Tokenizing...\") sp = spm.SentencePieceProcessor() sp.Load(MODEL_PREFIX.format( algorithm=algorithm, seg_word=seg_word) + \".model\") tokens = []", "from pathlib import Path import sentencepiece as spm import joblib import numpy as", "Model print(\"Training model...\") spm.SentencePieceTrainer.Train( '--input={} --model_prefix={} --vocab_size={} ' '--input_sentence_size=20000000 ' '--character_coverage=0.995 --model_type={algorithm}'.format( TMPPATH_WORD", "for _, line in tqdm(enumerate(f.readlines())): article = json.loads(line) if \"年表\" in article[\"title\"] or", "article[\"title\"] or \"列表\" in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]): title", "res = subprocess.run([ \"thulac\", \"-model_dir\", \"/mnt/SSD_Data/openai_nlp/THULAC/models/\", \"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE)", "\"-seg_only\", \"-input\", TMPPATH, \"-output\", TMPPATH_WORD ], stdout=subprocess.PIPE) print(res) # Train Model print(\"Training model...\")", "gzip.open(DATAPATH) as f: with open(TMPPATH, \"w\") as fw: for _, line in tqdm(enumerate(f.readlines())):", "for Wiki Dataset Example: * python scripts/wiki_sp_tokenize_json.py --word --unigram \"\"\" import gzip import", "continue for paragraph in [x for x in section.split(\"\\n\") if len(x) > 50]:", "in article[\"title\"] or \"列表\" in article[\"title\"]: continue for title, section in zip(article[\"section_titles\"], article[\"section_texts\"]):", "len(x) > 50]: paragraph = clean_text(paragraph) if len(paragraph) < 200 or filter_texts(paragraph): continue", "seg_word=seg_word) + \".model\") tokens = [] with open(TMPPATH_WORD if seg_word else TMPPATH) as", "--model_type={algorithm}'.format( TMPPATH_WORD if seg_word else TMPPATH, MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word), VOC_SIZE, algorithm=\"unigram\" ) ) def" ]
[ "Django from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name = \"paranuara.citizens\"", "<filename>paranuara/citizens/apps.py \"\"\"Citizens app\"\"\" # Django from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app", "django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name = \"paranuara.citizens\" verbose_name =", "from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name = \"paranuara.citizens\" verbose_name", "app\"\"\" # Django from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name", "import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name = \"paranuara.citizens\" verbose_name = 'Citizens'", "# Django from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\" name =", "\"\"\"Citizens app\"\"\" # Django from django.apps import AppConfig class CitizensAppConfig(AppConfig): \"\"\"Citizens app config\"\"\"" ]
[ "(32, 2, 32) ret = add_inner(a, b) return ret a = jnp.ones((2, 32,", "jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x, w): return x @ w", "32) return x @ y # ret.shape = (32, 2, 32) ret =", "jax import lax import jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x, w):", "None), out_axes=0) def add(a, b): # a.shape = (32, 64) # b.shape =", "import lax import jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x, w): return", "b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x, 'i')", "normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__ == \"__main__\": #debug_pmap() #test_nested_pmap()", "in_axes=(0, None), out_axes=0) def add(a, b): # a.shape = (32, 64) # b.shape", "#jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum():", "out_axes=1) def add_inner(x, y): # x.shape = (32, 64) # y.shape = (64,", "= (64, 32) return x @ y # ret.shape = (32, 2, 32)", "jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b)", "= func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None),", "b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c =", "32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape = (32,", "#print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return", "= (32, 64) # y.shape = (64, 32) return x @ y #", "import jax from jax import lax import jax.numpy as jnp def debug_pmap(): @jax.pmap", "test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__", "@ y # ret.shape = (32, 2, 32) ret = add_inner(a, b) return", "64)) b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c", "= (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): #", "lax import jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x, w): return x", "4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b):", "# ret.shape = (32, 2, 32) ret = add_inner(a, b) return ret a", "# y.shape = (64, 32) return x @ y # ret.shape = (32,", "2, 32) ret = add_inner(a, b) return ret a = jnp.ones((2, 32, 64))", "@partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__ ==", "#print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x):", "import jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x, w): return x @", "b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y):", "def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__ == \"__main__\": #debug_pmap()", "@partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape = (32, 64)", "ret.shape = (32, 2, 32) ret = add_inner(a, b) return ret a =", "jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a,", "return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__ == \"__main__\": #debug_pmap() #test_nested_pmap() test_allreduce_sum()", "def func(x, w): return x @ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4)))", "ret = add_inner(a, b) return ret a = jnp.ones((2, 32, 64)) b =", "def debug_pmap(): @jax.pmap def func(x, w): return x @ w y = func(jnp.ones((2,", "axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape = (32, 64) #", "= jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap,", "axis_name='i') def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if __name__ == \"__main__\":", "return ret a = jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr", "print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): #", "def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): # a.shape =", "add(a, b): # a.shape = (32, 64) # b.shape = (64, 2, 32)", "2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c)", "jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr)", "= add_inner(a, b) return ret a = jnp.ones((2, 32, 64)) b = jnp.ones((64,", "test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): # a.shape = (32,", "<reponame>yf225/alpa from functools import partial import jax from jax import lax import jax.numpy", "b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def", "out_axes=0) def add(a, b): # a.shape = (32, 64) # b.shape = (64,", "1), out_axes=1) def add_inner(x, y): # x.shape = (32, 64) # y.shape =", "32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape)", "print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2)))", "as jnp def debug_pmap(): @jax.pmap def func(x, w): return x @ w y", "64) # b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def", "2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape =", "b) return ret a = jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32))", "def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x, 'i') print(normalize(jnp.arange(2))) if", "c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x", "from functools import partial import jax from jax import lax import jax.numpy as", "def add(a, b): # a.shape = (32, 64) # b.shape = (64, 2,", "y.shape = (64, 32) return x @ y # ret.shape = (32, 2,", "@jax.pmap def func(x, w): return x @ w y = func(jnp.ones((2, 4)), jnp.ones((2,", "(32, 64) # b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1)", "# b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x,", "a = jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a,", "= (32, 64) # b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1),", "b): # a.shape = (32, 64) # b.shape = (64, 2, 32) @partial(jax.pmap,", "# a.shape = (32, 64) # b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1',", "y): # x.shape = (32, 64) # y.shape = (64, 32) return x", "add_inner(x, y): # x.shape = (32, 64) # y.shape = (64, 32) return", "def add_inner(x, y): # x.shape = (32, 64) # y.shape = (64, 32)", "4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def", "type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): # a.shape", "x.shape = (32, 64) # y.shape = (64, 32) return x @ y", "jax from jax import lax import jax.numpy as jnp def debug_pmap(): @jax.pmap def", "x @ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap():", "add_inner(a, b) return ret a = jnp.ones((2, 32, 64)) b = jnp.ones((64, 2,", "y # ret.shape = (32, 2, 32) ret = add_inner(a, b) return ret", "64) # y.shape = (64, 32) return x @ y # ret.shape =", "(32, 64) # y.shape = (64, 32) return x @ y # ret.shape", "from jax import lax import jax.numpy as jnp def debug_pmap(): @jax.pmap def func(x,", "debug_pmap(): @jax.pmap def func(x, w): return x @ w y = func(jnp.ones((2, 4)),", "y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0,", "= jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b)", "func(x, w): return x @ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y,", "ret a = jnp.ones((2, 32, 64)) b = jnp.ones((64, 2, 32)) #jaxpr =", "func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0)", "return x @ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def", "w): return x @ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y))", "= (32, 2, 32) ret = add_inner(a, b) return ret a = jnp.ones((2,", "x @ y # ret.shape = (32, 2, 32) ret = add_inner(a, b)", "partial import jax from jax import lax import jax.numpy as jnp def debug_pmap():", "import partial import jax from jax import lax import jax.numpy as jnp def", "add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x / lax.psum(x,", "32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def", "a.shape = (32, 64) # b.shape = (64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None,", "(64, 32) return x @ y # ret.shape = (32, 2, 32) ret", "axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): # a.shape = (32, 64) #", "(64, 2, 32) @partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape", "@ w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap,", "@partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0) def add(a, b): # a.shape = (32, 64)", "jnp def debug_pmap(): @jax.pmap def func(x, w): return x @ w y =", "= jnp.ones((64, 2, 32)) #jaxpr = jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a,", "in_axes=(None, 1), out_axes=1) def add_inner(x, y): # x.shape = (32, 64) # y.shape", "= add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i') def normalize(x): return x /", "w y = func(jnp.ones((2, 4)), jnp.ones((2, 4))) print(y, type(y)) def test_nested_pmap(): @partial(jax.pmap, axis_name='a0',", "# x.shape = (32, 64) # y.shape = (64, 32) return x @", "32) ret = add_inner(a, b) return ret a = jnp.ones((2, 32, 64)) b", "functools import partial import jax from jax import lax import jax.numpy as jnp", "jax.make_jaxpr(add)(a, b) #print(jaxpr) #print(jaxpr.jaxpr.outvars[0].aval.shape) c = add(a, b) print(c) def test_allreduce_sum(): @partial(jax.pmap, axis_name='i')", "return x @ y # ret.shape = (32, 2, 32) ret = add_inner(a," ]
[ "import os import json import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True)", "for value in data[name]] elif name[0] == 'I': inputs[name] = [[float(value)] for value", "= [[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl -d", "if name[0] == 'C': inputs[name] = [[int(value)] for value in data[name]] elif name[0]", "in data.columns: if name[0] == 'C': inputs[name] = [[int(value)] for value in data[name]]", "elif name[0] == 'I': inputs[name] = [[float(value)] for value in data[name]] post =", "required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs", "value in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl -d '{post}' {args.host}/v1/models/{args.model}:predict\" print(command)", "= [[int(value)] for value in data[name]] elif name[0] == 'I': inputs[name] = [[float(value)]", "in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl -d '{post}' {args.host}/v1/models/{args.model}:predict\" print(command) os.system(command)", "pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in data.columns: if name[0] == 'C':", "inputs = dict() for name in data.columns: if name[0] == 'C': inputs[name] =", "for value in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl -d '{post}' {args.host}/v1/models/{args.model}:predict\"", "required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data =", "args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in", "inputs[name] = [[int(value)] for value in data[name]] elif name[0] == 'I': inputs[name] =", "= parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in data.columns:", "dict() for name in data.columns: if name[0] == 'C': inputs[name] = [[int(value)] for", "json import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True)", "<filename>laboratory/strangedemo/criteo_predict.py import os import json import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data',", "required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name", "required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict()", "parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in data.columns: if", "for name in data.columns: if name[0] == 'C': inputs[name] = [[int(value)] for value", "argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True)", "== 'I': inputs[name] = [[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs}) command", "= dict() for name in data.columns: if name[0] == 'C': inputs[name] = [[int(value)]", "nrows=args.rows) inputs = dict() for name in data.columns: if name[0] == 'C': inputs[name]", "name[0] == 'C': inputs[name] = [[int(value)] for value in data[name]] elif name[0] ==", "parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs =", "data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in data.columns: if name[0]", "parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for", "data[name]] elif name[0] == 'I': inputs[name] = [[float(value)] for value in data[name]] post", "'C': inputs[name] = [[int(value)] for value in data[name]] elif name[0] == 'I': inputs[name]", "value in data[name]] elif name[0] == 'I': inputs[name] = [[float(value)] for value in", "os import json import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows',", "type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data, nrows=args.rows)", "[[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl -d '{post}'", "argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args()", "name[0] == 'I': inputs[name] = [[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs})", "name in data.columns: if name[0] == 'C': inputs[name] = [[int(value)] for value in", "== 'C': inputs[name] = [[int(value)] for value in data[name]] elif name[0] == 'I':", "'I': inputs[name] = [[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs}) command =", "parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args", "import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host',", "import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model',", "parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data", "= pandas.read_csv(args.data, nrows=args.rows) inputs = dict() for name in data.columns: if name[0] ==", "inputs[name] = [[float(value)] for value in data[name]] post = json.dumps({'inputs':inputs}) command = f\"curl", "import json import pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int,", "in data[name]] elif name[0] == 'I': inputs[name] = [[float(value)] for value in data[name]]", "data.columns: if name[0] == 'C': inputs[name] = [[int(value)] for value in data[name]] elif", "pandas import argparse parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True)", "parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args = parser.parse_args() data = pandas.read_csv(args.data,", "[[int(value)] for value in data[name]] elif name[0] == 'I': inputs[name] = [[float(value)] for", "= argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--rows', type=int, required=True) parser.add_argument('--model', required=True) parser.add_argument('--host', required=True) args =" ]
[ "(optional, ndarray (3, ) ): Mean axis kappa (optional, float): positive concentration parameter", "* np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:,", "Watson distribution to data Arguments ---------- data : ndarray (n, 3) Vector data", "None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z", "= 2 * np.pi * np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples", "Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa", "@njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf =", "`imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate", "``x`` given a parameterized Watson distribution Arguments ---------- x : ndarray (size, 3)", "np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f", "Vector data the distribution is fitted to ''' T = 1/data.shape[0] * orientation_matrix(data)", "manually.\") def fit(self, data): ''' Fits the Watson distribution to data Arguments ----------", "& = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's", "kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True: self.mu =", "mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache", "distribution is only implemented for positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray", "---------- pdfvals : ndarray (size,) PDF values as ndarray of shape (size, )", "* ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant,", "Watson distribution is an isotropic distribution for axial data. Its PDF is defined", "= np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] = temp * x samples[:,", "= M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent", "y samples[:, 2] = z for i in range(size): vec=samples[i, :] samples[i, :]", "uniformcirle = 2 * np.pi * np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle)", "Statistics, 1999. Chen. Generate Random Samples from von Mises-Fisher and Watson Distributions. 2012", "rvs(self, size = 1): ''' Generate samples from the Watson distribution Arguments ----------", "from the Watson distribution Arguments ---------- size : int, optional, default 1 Number", "for axial data. Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa)", "Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random Samples from von Mises-Fisher and", "optional, default 1 Number of samples Returns ---------- samples : ndarray (size, 3)", "- intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged", "number_samples +=1 return res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix, size): ones", "f: res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache = True) def _sample(kappa,", "---------- data : ndarray (n, 3) Vector data the distribution is fitted to", "of shape (size, ) ''' if self.mu is not None and self.kappa is", "(3, ) ): Mean axis kappa (optional, float): positive concentration parameter The Watson", "constant return pdf else: raise ValueError(\"Watson distribution not parameterized. Fit it to data", "positive concentration parameter The Watson distribution is an isotropic distribution for axial data.", "3) samples as ndarray of shape (size, 3) ''' if self.mu is not", "samples else: raise ValueError(\"Watson distribution not parameterized. Fit it to data or set", "from von Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self, mu = None,", "z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant,", "= evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa)", "res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache = True) def _sample(kappa, constant,", "def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator", "isotropic distribution for axial data. Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}|", "= np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f kappa_fit, root_res = brentq(obj,", "mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa =", "constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples", "x : ndarray (size, 3) Vectors to evaluate the PDF at Returns ----------", "to data Arguments ---------- data : ndarray (n, 3) Vector data the distribution", "self.mu = mu self.kappa = kappa def rvs(self, size = 1): ''' Generate", "= kappa def rvs(self, size = 1): ''' Generate samples from the Watson", "Returns ---------- samples : ndarray (size, 3) samples as ndarray of shape (size,", "function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp.", "np.empty((size, 3)) samples[:, 0] = temp * x samples[:, 1] = temp *", "not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ =", "np.random.uniform(0.0, maxy) #calculate density at position x f = constant * np.exp(kappa *", "* np.exp(kappa) number_samples = 0 while number_samples < size: #draw uniform samples x_rand", "3) ''' if self.mu is not None and self.kappa is not None: sqrt_kappa", "2] = z for i in range(size): vec=samples[i, :] samples[i, :] = rot_matrix.dot(vec)", ")) z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle =", "not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x)", "* y samples[:, 2] = z for i in range(size): vec=samples[i, :] samples[i,", "= np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at position x f", ": ndarray (size, 3) samples as ndarray of shape (size, 3) ''' if", "`Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ .", "\"\"\" def __init__(self, mu = None, kappa = None): self.mu = mu self.kappa", "0 while number_samples < size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand", "self.kappa = kappa def rvs(self, size = 1): ''' Generate samples from the", "full_output=True) if root_res.converged == True: self.mu = mu_fitted self.kappa = kappa_fit else: raise", "given kappa maxy = constant * np.exp(kappa) number_samples = 0 while number_samples <", "''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:,", "set of vectors ``x`` given a parameterized Watson distribution Arguments ---------- x :", "shape (size, ) ''' if self.mu is not None and self.kappa is not", "is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa,", "f = nominator/denominator - intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4, 500.,", "= x_rand number_samples +=1 return res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix,", "def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for", "distribution Arguments ---------- x : ndarray (size, 3) Vectors to evaluate the PDF", "concentration parameter The Watson distribution is an isotropic distribution for axial data. Its", "(size, 3) Vectors to evaluate the PDF at Returns ---------- pdfvals : ndarray", "samples as ndarray of shape (size, 3) ''' if self.mu is not None", "orientation_matrix from numba import njit from scipy.optimize import brentq class Watson: r\"\"\" Watson", "size): res_array = np.zeros((size, )) #maximal density for given kappa maxy = constant", "_sample(kappa, constant, rot_matrix, size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size)", "---------- x : ndarray (size, 3) Vectors to evaluate the PDF at Returns", "is fitted to ''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T)", "np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size)", "Generate Random Samples from von Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self,", "Watson distribution Arguments ---------- size : int, optional, default 1 Number of samples", "constant, rot_matrix, size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp", "axial data. Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) &", "PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2)", "is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0.,", "return pdf else: raise ValueError(\"Watson distribution not parameterized. Fit it to data or", "PDF values as ndarray of shape (size, ) ''' if self.mu is not", "= np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf", "pdf(self, x): ''' Calculate probability density function of a set of vectors ``x``", "def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant,", "math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2)", "denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_", "* constant return pdf else: raise ValueError(\"Watson distribution not parameterized. Fit it to", "z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle = 2", "manually.\") def pdf(self, x): ''' Calculate probability density function of a set of", "1): ''' Generate samples from the Watson distribution Arguments ---------- size : int,", "#draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density", "only implemented for positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, )", "self.mu = mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could not be", "parameters manually.\") def fit(self, data): ''' Fits the Watson distribution to data Arguments", "Samples from von Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self, mu =", "= True) def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size, )) z =", "n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i]", "\\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the", "The Watson distribution is only implemented for positive concentration parameter :math:`\\kappa`. Args: mu", "2012 \"\"\" def __init__(self, mu = None, kappa = None): self.mu = mu", "x f = constant * np.exp(kappa * x_rand * x_rand) #accept or reject", "data. Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & =", "is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\", "for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2) return", "parameters manually.\") def pdf(self, x): ''' Calculate probability density function of a set", "np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size) x = np.cos(uniformcirle)", "Fit it to data or set parameters manually.\") def fit(self, data): ''' Fits", ":math:`\\kappa`. Args: mu (optional, ndarray (3, ) ): Mean axis kappa (optional, float):", "= np.random.uniform(0.0, maxy) #calculate density at position x f = constant * np.exp(kappa", "nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator", "maxy = constant * np.exp(kappa) number_samples = 0 while number_samples < size: #draw", "pdfvals : ndarray (size,) PDF values as ndarray of shape (size, ) '''", "temp * x samples[:, 1] = temp * y samples[:, 2] = z", "3) Vector data the distribution is fitted to ''' T = 1/data.shape[0] *", "from numba import njit from scipy.optimize import brentq class Watson: r\"\"\" Watson distribution", "f = constant * np.exp(kappa * x_rand * x_rand) #accept or reject if", "self.mu is not None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant", "implemented for positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, ) ):", "= np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z,", "density for given kappa maxy = constant * np.exp(kappa) number_samples = 0 while", "#accept or reject if y_rand < f: res_array[number_samples] = x_rand number_samples +=1 return", "values as ndarray of shape (size, ) ''' if self.mu is not None", "= np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache = True) def", "- np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size) x = np.cos(uniformcirle) y", "@njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal density", "constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi *", "evaluate the PDF at Returns ---------- pdfvals : ndarray (size,) PDF values as", "samples[:, 2] = z for i in range(size): vec=samples[i, :] samples[i, :] =", "#calculate density at position x f = constant * np.exp(kappa * x_rand *", "(size,) PDF values as ndarray of shape (size, ) ''' if self.mu is", "np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size) x = np.cos(uniformcirle) y =", "np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf *", "* x_rand * x_rand) #accept or reject if y_rand < f: res_array[number_samples] =", "data the distribution is fitted to ''' T = 1/data.shape[0] * orientation_matrix(data) evals,", "= mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could not be estimated.\")", "- np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return", "vectors ``x`` given a parameterized Watson distribution Arguments ---------- x : ndarray (size,", "kappa_fit else: raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache = True) def", "def rvs(self, size = 1): ''' Generate samples from the Watson distribution Arguments", "* np.pi * np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size,", "fitted to ''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted", "else: raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu,", "parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, ) ): Mean axis kappa (optional,", "None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf", "* (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) *", "and Watson Distributions. 2012 \"\"\" def __init__(self, mu = None, kappa = None):", "x) pdf = pdf * constant return pdf else: raise ValueError(\"Watson distribution not", "if self.mu is not None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa)", ": ndarray (size, 3) Vectors to evaluate the PDF at Returns ---------- pdfvals", "ndarray (3, ) ): Mean axis kappa (optional, float): positive concentration parameter The", "Vectors to evaluate the PDF at Returns ---------- pdfvals : ndarray (size,) PDF", "res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size, ))", "size) temp = np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size)", ") ): Mean axis kappa (optional, float): positive concentration parameter The Watson distribution", "for given kappa maxy = constant * np.exp(kappa) number_samples = 0 while number_samples", "rotation_matrix from ._descriptive_stats import orientation_matrix from numba import njit from scipy.optimize import brentq", "not parameterized. Fit it to data or set parameters manually.\") def fit(self, data):", "brentq class Watson: r\"\"\" Watson distribution .. note:: The Watson distribution is only", "0] = temp * x samples[:, 1] = temp * y samples[:, 2]", "p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where", "Returns ---------- pdfvals : ndarray (size,) PDF values as ndarray of shape (size,", "mu = None, kappa = None): self.mu = mu self.kappa = kappa def", "= np.zeros((size, )) #maximal density for given kappa maxy = constant * np.exp(kappa)", "of a set of vectors ``x`` given a parameterized Watson distribution Arguments ----------", "set parameters manually.\") def pdf(self, x): ''' Calculate probability density function of a", "size : int, optional, default 1 Number of samples Returns ---------- samples :", "None, kappa = None): self.mu = mu self.kappa = kappa def rvs(self, size", "The Watson distribution is an isotropic distribution for axial data. Its PDF is", "= (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator -", "T = 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2]", "it to data or set parameters manually.\") def fit(self, data): ''' Fits the", "maxy) #calculate density at position x f = constant * np.exp(kappa * x_rand", "constant, size): res_array = np.zeros((size, )) #maximal density for given kappa maxy =", "#maximal density for given kappa maxy = constant * np.exp(kappa) number_samples = 0", "= rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle = 2 *", "Distributions. 2012 \"\"\" def __init__(self, mu = None, kappa = None): self.mu =", "parameterized Watson distribution Arguments ---------- x : ndarray (size, 3) Vectors to evaluate", "as ndarray of shape (size, 3) ''' if self.mu is not None and", "(optional, float): positive concentration parameter The Watson distribution is an isotropic distribution for", "_sample(self.kappa, constant, rot_matrix, size) return samples else: raise ValueError(\"Watson distribution not parameterized. Fit", "note:: The Watson distribution is only implemented for positive concentration parameter :math:`\\kappa`. Args:", "sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa)", "number_samples = 0 while number_samples < size: #draw uniform samples x_rand = np.random.uniform(-1.0,", "samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at position", "function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random Samples", "1999. Chen. Generate Random Samples from von Mises-Fisher and Watson Distributions. 2012 \"\"\"", "temp * y samples[:, 2] = z for i in range(size): vec=samples[i, :]", "---------- samples : ndarray (size, 3) samples as ndarray of shape (size, 3)", "kappa def rvs(self, size = 1): ''' Generate samples from the Watson distribution", "_pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant return pdf else: raise ValueError(\"Watson", "number_samples < size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0,", "self.kappa, x) pdf = pdf * constant return pdf else: raise ValueError(\"Watson distribution", "Directional Statistics, 1999. Chen. Generate Random Samples from von Mises-Fisher and Watson Distributions.", "samples[:, 0] = temp * x samples[:, 1] = temp * y samples[:,", "or set parameters manually.\") def pdf(self, x): ''' Calculate probability density function of", "distribution to data Arguments ---------- data : ndarray (n, 3) Vector data the", "np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle", "z for i in range(size): vec=samples[i, :] samples[i, :] = rot_matrix.dot(vec) return samples", "= None): self.mu = mu self.kappa = kappa def rvs(self, size = 1):", ")) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2)", "density at position x f = constant * np.exp(kappa * x_rand * x_rand)", ".. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa", "mu (optional, ndarray (3, ) ): Mean axis kappa (optional, float): positive concentration", "distribution is fitted to ''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors =", "distribution not parameterized. Fit it to data or set parameters manually.\") def pdf(self,", "= x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa", "samples = np.empty((size, 3)) samples[:, 0] = temp * x samples[:, 1] =", "None): self.mu = mu self.kappa = kappa def rvs(self, size = 1): '''", "samples[:, 1] = temp * y samples[:, 2] = z for i in", ":math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function", "= 1): ''' Generate samples from the Watson distribution Arguments ---------- size :", "x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] =", "res_array = np.zeros((size, )) #maximal density for given kappa maxy = constant *", "Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self, mu = None, kappa =", "< size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy)", "be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape", "i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf", "ndarray (size, 3) Vectors to evaluate the PDF at Returns ---------- pdfvals :", "the Watson distribution Arguments ---------- size : int, optional, default 1 Number of", "* x samples[:, 1] = temp * y samples[:, 2] = z for", "samples : ndarray (size, 3) samples as ndarray of shape (size, 3) '''", "np from scipy.special import erfi from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix", "1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at position x f = constant", "None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf", "Args: mu (optional, ndarray (3, ) ): Mean axis kappa (optional, float): positive", "constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant", ")) #maximal density for given kappa maxy = constant * np.exp(kappa) number_samples =", "rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return samples else: raise ValueError(\"Watson", "self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu,", "from ._descriptive_stats import orientation_matrix from numba import njit from scipy.optimize import brentq class", "axis kappa (optional, float): positive concentration parameter The Watson distribution is an isotropic", "np.exp(kappa * x_rand * x_rand) #accept or reject if y_rand < f: res_array[number_samples]", "x): ''' Calculate probability density function of a set of vectors ``x`` given", "concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, ) ): Mean axis kappa", "size = 1): ''' Generate samples from the Watson distribution Arguments ---------- size", "kappa maxy = constant * np.exp(kappa) number_samples = 0 while number_samples < size:", "): Mean axis kappa (optional, float): positive concentration parameter The Watson distribution is", "np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4,", "samples = _sample(self.kappa, constant, rot_matrix, size) return samples else: raise ValueError(\"Watson distribution not", "erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f kappa_fit, root_res", "= constant * np.exp(kappa * x_rand * x_rand) #accept or reject if y_rand", "r\"\"\" Watson distribution .. note:: The Watson distribution is only implemented for positive", "if root_res.converged == True: self.mu = mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration", "np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu)", "import orientation_matrix from numba import njit from scipy.optimize import brentq class Watson: r\"\"\"", "np.pi * np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3))", "parameter The Watson distribution is an isotropic distribution for axial data. Its PDF", "distribution not parameterized. Fit it to data or set parameters manually.\") def fit(self,", "= np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa):", "= 0 while number_samples < size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0)", "from scipy.optimize import brentq class Watson: r\"\"\" Watson distribution .. note:: The Watson", "True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, ))", "(size, 3) samples as ndarray of shape (size, 3) ''' if self.mu is", ": int, optional, default 1 Number of samples Returns ---------- samples : ndarray", "njit from scipy.optimize import brentq class Watson: r\"\"\" Watson distribution .. note:: The", "numba import njit from scipy.optimize import brentq class Watson: r\"\"\" Watson distribution ..", "if y_rand < f: res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache =", "= np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :]", "2 * np.pi * np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples =", "''' if self.mu is not None and self.kappa is not None: sqrt_kappa =", "pdf = pdf * constant return pdf else: raise ValueError(\"Watson distribution not parameterized.", "ndarray (size,) PDF values as ndarray of shape (size, ) ''' if self.mu", "(\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary", "constant, rot_matrix, size) return samples else: raise ValueError(\"Watson distribution not parameterized. Fit it", "Watson Distributions. 2012 \"\"\" def __init__(self, mu = None, kappa = None): self.mu", "* x_rand) #accept or reject if y_rand < f: res_array[number_samples] = x_rand number_samples", "kappa (optional, float): positive concentration parameter The Watson distribution is an isotropic distribution", "= nominator/denominator - intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True)", ": ndarray (size,) PDF values as ndarray of shape (size, ) ''' if", "Mean axis kappa (optional, float): positive concentration parameter The Watson distribution is an", "data or set parameters manually.\") def fit(self, data): ''' Fits the Watson distribution", "= pdf * constant return pdf else: raise ValueError(\"Watson distribution not parameterized. Fit", "True: self.mu = mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could not", "rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi", "as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & =", "= _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant return pdf else: raise", "ndarray of shape (size, 3) ''' if self.mu is not None and self.kappa", "density function of a set of vectors ``x`` given a parameterized Watson distribution", "= constant * np.exp(kappa) number_samples = 0 while number_samples < size: #draw uniform", "size) return samples else: raise ValueError(\"Watson distribution not parameterized. Fit it to data", "denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f kappa_fit, root_res =", "return f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True:", "samples from the Watson distribution Arguments ---------- size : int, optional, default 1", "fit(self, data): ''' Fits the Watson distribution to data Arguments ---------- data :", "== True: self.mu = mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could", "x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at position x", "data Arguments ---------- data : ndarray (n, 3) Vector data the distribution is", "np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi)", "3) Vectors to evaluate the PDF at Returns ---------- pdfvals : ndarray (size,)", "could not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples, _", "scipy.optimize import brentq class Watson: r\"\"\" Watson distribution .. note:: The Watson distribution", "import numpy as np from scipy.special import erfi from ._utils import rotation_matrix from", "distribution .. note:: The Watson distribution is only implemented for positive concentration parameter", "rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal density for given kappa maxy", "the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999. Chen.", "sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix =", "np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa,", "1e-4, 500., full_output=True) if root_res.converged == True: self.mu = mu_fitted self.kappa = kappa_fit", "._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from numba import njit from scipy.optimize", "it to data or set parameters manually.\") def pdf(self, x): ''' Calculate probability", "to data or set parameters manually.\") def fit(self, data): ''' Fits the Watson", "range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache =", "hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia,", ": ndarray (n, 3) Vector data the distribution is fitted to ''' T", "rot_matrix, size) return samples else: raise ValueError(\"Watson distribution not parameterized. Fit it to", "intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged ==", "orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted *", "kappa = None): self.mu = mu self.kappa = kappa def rvs(self, size =", "as np from scipy.special import erfi from ._utils import rotation_matrix from ._descriptive_stats import", "= mu self.kappa = kappa def rvs(self, size = 1): ''' Generate samples", "np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] = temp * x samples[:, 1]", "0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return", "= brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True: self.mu = mu_fitted self.kappa", "= True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples,", "at Returns ---------- pdfvals : ndarray (size,) PDF values as ndarray of shape", "x_rand) #accept or reject if y_rand < f: res_array[number_samples] = x_rand number_samples +=1", "np.exp(kappa) number_samples = 0 while number_samples < size: #draw uniform samples x_rand =", "np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] *", "np.random.random(size) x = np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0]", "given a parameterized Watson distribution Arguments ---------- x : ndarray (size, 3) Vectors", "an isotropic distribution for axial data. Its PDF is defined as .. math::", "import rotation_matrix from ._descriptive_stats import orientation_matrix from numba import njit from scipy.optimize import", "a parameterized Watson distribution Arguments ---------- x : ndarray (size, 3) Vectors to", "constant * np.exp(kappa) number_samples = 0 while number_samples < size: #draw uniform samples", "mu self.kappa = kappa def rvs(self, size = 1): ''' Generate samples from", "< f: res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache = True) def", "(size, 3) ''' if self.mu is not None and self.kappa is not None:", "unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache = True)", "temp = np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size) x", ".. note:: The Watson distribution is only implemented for positive concentration parameter :math:`\\kappa`.", "\\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M`", "* orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted", ":] * mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array", "Watson: r\"\"\" Watson distribution .. note:: The Watson distribution is only implemented for", "_ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] =", "2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator =", "is only implemented for positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3,", ". References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random Samples from von", "the distribution is fitted to ''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors", "np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant return pdf", "not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.])", "defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu}, \\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ &", "raise ValueError(\"Watson distribution not parameterized. Fit it to data or set parameters manually.\")", "raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa,", "(n, 3) Vector data the distribution is fitted to ''' T = 1/data.shape[0]", "= True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal density for", "import brentq class Watson: r\"\"\" Watson distribution .. note:: The Watson distribution is", "& = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and", "position x f = constant * np.exp(kappa * x_rand * x_rand) #accept or", "Fit it to data or set parameters manually.\") def pdf(self, x): ''' Calculate", "uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at", "ndarray of shape (size, ) ''' if self.mu is not None and self.kappa", "* erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res return f kappa_fit,", "nominator/denominator - intermed_res return f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if", "reject if y_rand < f: res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache", "Watson distribution Arguments ---------- x : ndarray (size, 3) Vectors to evaluate the", "Random Samples from von Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self, mu", "constant * np.exp(kappa * x_rand * x_rand) #accept or reject if y_rand <", "y_rand < f: res_array[number_samples] = x_rand number_samples +=1 return res_array @njit(cache = True)", ":math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999.", "of vectors ``x`` given a parameterized Watson distribution Arguments ---------- x : ndarray", "of samples Returns ---------- samples : ndarray (size, 3) samples as ndarray of", "\\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_", "def fit(self, data): ''' Fits the Watson distribution to data Arguments ---------- data", "f kappa_fit, root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True: self.mu", "* mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array =", "or reject if y_rand < f: res_array[number_samples] = x_rand number_samples +=1 return res_array", "M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric", "= \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}`", "---------- size : int, optional, default 1 Number of samples Returns ---------- samples", "True) def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa,", "size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones", "return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, ))", "''' Generate samples from the Watson distribution Arguments ---------- size : int, optional,", "PDF at Returns ---------- pdfvals : ndarray (size,) PDF values as ndarray of", "root_res = brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True: self.mu = mu_fitted", "probability density function of a set of vectors ``x`` given a parameterized Watson", "3)) samples[:, 0] = temp * x samples[:, 1] = temp * y", "distribution is an isotropic distribution for axial data. Its PDF is defined as", "sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf =", "is an isotropic distribution for axial data. Its PDF is defined as ..", "= _sample(self.kappa, constant, rot_matrix, size) return samples else: raise ValueError(\"Watson distribution not parameterized.", "500., full_output=True) if root_res.converged == True: self.mu = mu_fitted self.kappa = kappa_fit else:", "(T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5)", "pdf else: raise ValueError(\"Watson distribution not parameterized. Fit it to data or set", "not None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa)", "= np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa -", "rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return samples else:", "evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted))", "def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal density for given kappa", "self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return samples else: raise ValueError(\"Watson distribution", "((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size):", "set parameters manually.\") def fit(self, data): ''' Fits the Watson distribution to data", "intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa", "parameter could not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples,", "x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples):", "= kappa_fit else: raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache = True)", "obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator =", "the Watson distribution to data Arguments ---------- data : ndarray (n, 3) Vector", "data or set parameters manually.\") def pdf(self, x): ''' Calculate probability density function", "import njit from scipy.optimize import brentq class Watson: r\"\"\" Watson distribution .. note::", "(\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes `Kummer's confluent hypergeometric function", "= np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones - np.square(z))", "positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, ) ): Mean axis", "float): positive concentration parameter The Watson distribution is an isotropic distribution for axial", "= None, kappa = None): self.mu = mu self.kappa = kappa def rvs(self,", "References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random Samples from von Mises-Fisher", ") ''' if self.mu is not None and self.kappa is not None: sqrt_kappa", "from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from numba import njit from", "root_res.converged == True: self.mu = mu_fitted self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter", "ndarray (n, 3) Vector data the distribution is fitted to ''' T =", "in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache", "while number_samples < size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand =", "self.kappa = kappa_fit else: raise ValueError(\"Concentration parameter could not be estimated.\") @njit(cache =", "von Mises-Fisher and Watson Distributions. 2012 \"\"\" def __init__(self, mu = None, kappa", "Arguments ---------- size : int, optional, default 1 Number of samples Returns ----------", "parameterized. Fit it to data or set parameters manually.\") def pdf(self, x): '''", "x samples[:, 1] = temp * y samples[:, 2] = z for i", "Arguments ---------- data : ndarray (n, 3) Vector data the distribution is fitted", "to data or set parameters manually.\") def pdf(self, x): ''' Calculate probability density", "numpy as np from scipy.special import erfi from ._utils import rotation_matrix from ._descriptive_stats", "1 Number of samples Returns ---------- samples : ndarray (size, 3) samples as", "and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z =", "= np.sqrt(ones - np.square(z)) uniformcirle = 2 * np.pi * np.random.random(size) x =", "1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return samples", "x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa *", "= np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples =", "brentq(obj, 1e-4, 500., full_output=True) if root_res.converged == True: self.mu = mu_fitted self.kappa =", "distribution Arguments ---------- size : int, optional, default 1 Number of samples Returns", "confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References:", "''' Fits the Watson distribution to data Arguments ---------- data : ndarray (n,", "estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf", "= np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant return", "_pdf_wo_constant(mu, kappa, x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for i", "y_rand = np.random.uniform(0.0, maxy) #calculate density at position x f = constant *", "np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f =", "for positive concentration parameter :math:`\\kappa`. Args: mu (optional, ndarray (3, ) ): Mean", "mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size,", "to evaluate the PDF at Returns ---------- pdfvals : ndarray (size,) PDF values", "or set parameters manually.\") def fit(self, data): ''' Fits the Watson distribution to", "= np.sqrt(kappa) nominator = (2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f", "np.exp(kappa * ((x[i, :] * mu).sum())**2) return unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa,", "Arguments ---------- x : ndarray (size, 3) Vectors to evaluate the PDF at", "erfi from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from numba import njit", "distribution for axial data. Its PDF is defined as .. math:: p_{Watson}(\\pm\\mathbf{x}| \\boldsymbol{\\mu},", "parameterized. Fit it to data or set parameters manually.\") def fit(self, data): '''", "x_rand number_samples +=1 return res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix, size):", "pdf * constant return pdf else: raise ValueError(\"Watson distribution not parameterized. Fit it", "where :math:`M` denotes `Kummer's confluent hypergeometric function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error", "class Watson: r\"\"\" Watson distribution .. note:: The Watson distribution is only implemented", "Generate samples from the Watson distribution Arguments ---------- size : int, optional, default", "= np.array([0., 0., 1.]) rot_matrix = rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix,", "error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random", "as ndarray of shape (size, ) ''' if self.mu is not None and", "evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa = np.sqrt(kappa) nominator", "= np.empty((size, 3)) samples[:, 0] = temp * x samples[:, 1] = temp", "and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) pdf =", "function of a set of vectors ``x`` given a parameterized Watson distribution Arguments", "\\kappa) & = M\\left(\\frac{1}{2},\\frac{3}{2},\\kappa\\right)\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) \\\\ & = \\frac{\\sqrt{\\pi}\\mathrm{erfi}(\\sqrt{\\kappa})}{2\\sqrt{\\kappa}}\\exp(\\kappa (\\boldsymbol{\\mu}^T\\mathbf{x})^2) where :math:`M` denotes", "ValueError(\"Concentration parameter could not be estimated.\") @njit(cache = True) def _pdf_wo_constant(mu, kappa, x):", "shape (size, 3) ''' if self.mu is not None and self.kappa is not", "np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate density at position x f =", "return res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size,", "at position x f = constant * np.exp(kappa * x_rand * x_rand) #accept", "Number of samples Returns ---------- samples : ndarray (size, 3) samples as ndarray", "of shape (size, 3) ''' if self.mu is not None and self.kappa is", "pdf = _pdf_wo_constant(self.mu, self.kappa, x) pdf = pdf * constant return pdf else:", "return samples else: raise ValueError(\"Watson distribution not parameterized. Fit it to data or", "not parameterized. Fit it to data or set parameters manually.\") def pdf(self, x):", "<https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics, 1999. Chen. Generate Random Samples from", "1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res =", "self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0.,", "size: #draw uniform samples x_rand = np.random.uniform(-1.0, 1.0) y_rand = np.random.uniform(0.0, maxy) #calculate", "default 1 Number of samples Returns ---------- samples : ndarray (size, 3) samples", "1] = temp * y samples[:, 2] = z for i in range(size):", "* np.exp(kappa * x_rand * x_rand) #accept or reject if y_rand < f:", "scipy.special import erfi from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from numba", "rot_matrix, size): ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp =", "__init__(self, mu = None, kappa = None): self.mu = mu self.kappa = kappa", "Calculate probability density function of a set of vectors ``x`` given a parameterized", "Watson distribution is only implemented for positive concentration parameter :math:`\\kappa`. Args: mu (optional,", "._descriptive_stats import orientation_matrix from numba import njit from scipy.optimize import brentq class Watson:", "data : ndarray (n, 3) Vector data the distribution is fitted to '''", "np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def obj(kappa): sqrt_kappa", "Chen. Generate Random Samples from von Mises-Fisher and Watson Distributions. 2012 \"\"\" def", "<https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html#scipy.special.hyp1f1>`_ and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional", "= np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] = temp", "= temp * y samples[:, 2] = z for i in range(size): vec=samples[i,", "unnormalized_pdf = np.zeros((n_samples, )) for i in range(n_samples): unnormalized_pdf[i] = np.exp(kappa * ((x[i,", "= temp * x samples[:, 1] = temp * y samples[:, 2] =", "@njit(cache = True) def _sample(kappa, constant, rot_matrix, size): ones = np.ones((size, )) z", "(size, ) ''' if self.mu is not None and self.kappa is not None:", "x_rand * x_rand) #accept or reject if y_rand < f: res_array[number_samples] = x_rand", "ndarray (size, 3) samples as ndarray of shape (size, 3) ''' if self.mu", "the PDF at Returns ---------- pdfvals : ndarray (size,) PDF values as ndarray", "def pdf(self, x): ''' Calculate probability density function of a set of vectors", "Jupp. Directional Statistics, 1999. Chen. Generate Random Samples from von Mises-Fisher and Watson", "else: raise ValueError(\"Watson distribution not parameterized. Fit it to data or set parameters", "ValueError(\"Watson distribution not parameterized. Fit it to data or set parameters manually.\") def", "(2*np.exp(kappa)*sqrt_kappa - np.sqrt(np.pi) * erfi(sqrt_kappa))/(4*kappa**1.5) denominator = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) f = nominator/denominator - intermed_res", "np.zeros((size, )) #maximal density for given kappa maxy = constant * np.exp(kappa) number_samples", "Fits the Watson distribution to data Arguments ---------- data : ndarray (n, 3)", "= rotation_matrix(z, self.mu) samples = _sample(self.kappa, constant, rot_matrix, size) return samples else: raise", "Watson distribution .. note:: The Watson distribution is only implemented for positive concentration", "= 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res", "''' Calculate probability density function of a set of vectors ``x`` given a", "from scipy.special import erfi from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from", "None: sqrt_kappa = np.sqrt(self.kappa) constant = np.sqrt(np.pi)*erfi(sqrt_kappa)/(2*sqrt_kappa) z = np.array([0., 0., 1.]) rot_matrix", "data): ''' Fits the Watson distribution to data Arguments ---------- data : ndarray", "kappa, x): n_samples, _ = x.shape unnormalized_pdf = np.zeros((n_samples, )) for i in", "= z for i in range(size): vec=samples[i, :] samples[i, :] = rot_matrix.dot(vec) return", "and :math:`\\mathrm{erfi}` the `imaginary error function <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfi.html>`_ . References: Mardia, Jupp. Directional Statistics,", "is not None and self.kappa is not None: sqrt_kappa = np.sqrt(self.kappa) constant =", "int, optional, default 1 Number of samples Returns ---------- samples : ndarray (size,", "def __init__(self, mu = None, kappa = None): self.mu = mu self.kappa =", "evectors = np.linalg.eigh(T) mu_fitted = evectors[:, 2] intermed_res = np.sum(mu_fitted * (T@mu_fitted)) def", "import erfi from ._utils import rotation_matrix from ._descriptive_stats import orientation_matrix from numba import", "np.cos(uniformcirle) y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] = temp *", "a set of vectors ``x`` given a parameterized Watson distribution Arguments ---------- x", "+=1 return res_array @njit(cache = True) def _sample(kappa, constant, rot_matrix, size): ones =", "<filename>spherical_stats/_watson.py import numpy as np from scipy.special import erfi from ._utils import rotation_matrix", "unnormalized_pdf @njit(cache = True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal", "y = np.sin(uniformcirle) samples = np.empty((size, 3)) samples[:, 0] = temp * x", "to ''' T = 1/data.shape[0] * orientation_matrix(data) evals, evectors = np.linalg.eigh(T) mu_fitted =", "ones = np.ones((size, )) z = rejection_sampling_numba(kappa, constant, size) temp = np.sqrt(ones -", "True) def rejection_sampling_numba(kappa, constant, size): res_array = np.zeros((size, )) #maximal density for given", "samples Returns ---------- samples : ndarray (size, 3) samples as ndarray of shape" ]
[]
[ "os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve def _setup(root, N=100,", "i in range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in", "\"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data =", "k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k in [\"attr1\", \"attr2\",", "Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root,", "3)) mmap[:] = data # view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root,", "views/complex - views/simple \"\"\".format( root ) ) return super_root, root, view_root def _teardown(test_data_root):", "edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve def _setup(root, N=100, V=25): from", "k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) ==", "mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] =", "if kk == \"complex\": for i in range(2): for k in [\"attr1\", \"attr2\",", "\"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"),", "root, view_root def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you sure you", "data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype,", "import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\")", "= os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] =", "data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test", "np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap", "support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) #", "= {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref]", "V for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if kk", "64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write(", "\"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data", "mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap", "label `image`. ## Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root", "`image`. ## Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root =", "dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root,", "1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap", "assert len(M.labels[kk][i][k]) == V else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert", "ex 0 this is the same for both complex and simple single_ref =", "= MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show() assert len(M) == V", "[\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if kk == \"complex\": for i", "mmap[:] = data data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\")", "complex and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple =", "2)) mmap[:] = data data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\",", "paths to the images are in the label `image`. ## Content image: images", "np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref] * 3]", "view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data =", "[\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) == V d", "V else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk]", "os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root,", "open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset This", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N, 17,", "MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show() assert len(M) == V for", "from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve def _setup(root, N=100, V=25):", "dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view 2 data = np.zeros(shape=(V, 5,", "mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path", "= data # view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\")", "os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data", "= 25 try: super_root, base_root, view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root)", "= paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path,", "= data for p in paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8)", "= os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\",", "a view dataset which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root:", "Test Dataset This is a view dataset which loads images from a base.", "views: simple1: simple simple: views/simple complex: - views/complex - views/simple \"\"\".format( root )", "os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data", "Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\")", "in the label `image`. ## Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\"", "data for p in paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im", "view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path,", "\"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key, val): assert np.all(val", "mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "= os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,))", "mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data # view 3 data = np.arange(V).astype(int)", "for i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype,", "= Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: |", "view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3))", "0} ref_simple = single_ref ref_complex = [[single_ref] * 3] * 20 ref =", "M.show() assert len(M) == V for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk", "17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,", "val): assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True) assert hasattr(M, \"meta\") finally:", "= np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\")", "2)) mmap[:] = data for p in paths: image = (255 * np.ones((64,", "f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path,", "data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "# For ex 0 this is the same for both complex and simple", "to the images are in the label `image`. ## Content image: images loader_kwargs:", "want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100", "\"labels\", \"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root,", "ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, }", "kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if kk == \"complex\":", "mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2))", "from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple: views/simple", "delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100 V =", "super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"),", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view 2 data =", "os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\")", "mmap[:] = data # view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path =", "with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset", "mfile.write( \"\"\" description: | # Test Dataset This is a dataset which loads", "- views/simple \"\"\".format( root ) ) return super_root, root, view_root def _teardown(test_data_root): if", "from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root =", "data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i", "f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N)", "= np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,))", "is a dataset which loads images. All paths to the images are in", "100 V = 25 try: super_root, base_root, view_root = _setup(\".\", N, V) M", "image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root,", "\"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root,", "= np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap", "{ \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key,", "shape=(V, 5, 3)) mmap[:] = data # view 3 data = np.arange(V).astype(int) mmap_path", "= data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap =", "as mfile: mfile.write( \"\"\" description: | # Test Dataset This is a dataset", "M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0] # For ex 0 this", "\"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data #", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view 2 data = np.zeros(shape=(V,", "you sure you want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset():", "mmap[:] = data # view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\",", "im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description:", "\"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset This is", "0 this is the same for both complex and simple single_ref = {\"image\":", "\"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap =", "base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple: views/simple complex: -", "ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key, val): assert np.all(val ==", "= os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True)", "\"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0]", "assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k in [\"attr1\",", "\"\"\".format( root ) ) return super_root, root, view_root def _teardown(test_data_root): if test_data_root ==", "MetaViewDataset from edflow.util import walk, retrieve def _setup(root, N=100, V=25): from PIL import", "= True M.append_labels = False M.show() assert len(M) == V for kk in", "\"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref] * 3] * 20 ref", "\"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k", "\"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data =", "simple1: simple simple: views/simple complex: - views/complex - views/simple \"\"\".format( root ) )", "test_meta_view_dset(): N = 100 V = 25 try: super_root, base_root, view_root = _setup(\".\",", "loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"),", "import MetaViewDataset from edflow.util import walk, retrieve def _setup(root, N=100, V=25): from PIL", "False M.show() assert len(M) == V for kk in [\"simple1\", \"simple\", \"complex\"]: assert", "\"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k in", "dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data # view 3 data =", "os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5,", "single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex =", "= False M.show() assert len(M) == V for kk in [\"simple1\", \"simple\", \"complex\"]:", "Test Dataset This is a dataset which loads images. All paths to the", ") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data #", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N, 17, 2))", "{\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref] *", "# view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap =", "image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True)", "views/simple complex: - views/complex - views/simple \"\"\".format( root ) ) return super_root, root,", "2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\"", "mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] =", "2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17,", "import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve def _setup(root,", "exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)])", "loads images. All paths to the images are in the label `image`. ##", "\"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path =", "5, 3)) mmap[:] = data # view 3 data = np.arange(V).astype(int) mmap_path =", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for p in", "data data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap =", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile:", "is the same for both complex and simple single_ref = {\"image\": np.ones(shape=(64, 64,", "np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "[[single_ref] * 3] * 20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\":", "_teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you sure you want to delete", "image: images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root,", "simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex", "this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100 V = 25", "edflow.util import walk, retrieve def _setup(root, N=100, V=25): from PIL import Image super_root", "import walk, retrieve def _setup(root, N=100, V=25): from PIL import Image super_root =", "assert len(M) == V for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in", "exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\",", "* np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as", "= data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | #", "= { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def", "mmap[:] = data for p in paths: image = (255 * np.ones((64, 64,", "which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1:", "0, } def tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True)", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data # view 3 data", "| # Test Dataset This is a dataset which loads images. All paths", "root: {} views: simple1: simple simple: views/simple complex: - views/complex - views/simple \"\"\".format(", "in M.labels if kk == \"complex\": for i in range(2): for k in", "25 try: super_root, base_root, view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand", "data # view 3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap", "exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root,", "both complex and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple", "numpy as np import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk,", "len(M.labels[kk][i][k]) == V else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k", "ref_simple = single_ref ref_complex = [[single_ref] * 3] * 20 ref = {", "images are in the label `image`. ## Content image: images loader_kwargs: image: support:", "= (255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"),", "\"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) == V d =", "in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,))", "f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for", "import numpy as np import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import", "np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap =", "data # view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root,", "= os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths", "len(M) == V for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels", "base_kwargs: root: {} views: simple1: simple simple: views/simple complex: - views/complex - views/simple", "ref_simple], \"index_\": 0, } def tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d,", "single_ref ref_complex = [[single_ref] * 3] * 20 ref = { \"simple1\": ref_simple,", "shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap", "= os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2))", "is a view dataset which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs:", "= os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path =", "\"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in", "Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | #", "3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\"", "os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:]", "paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with", ") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view 2", "-rf {test_data_root}\") def test_meta_view_dset(): N = 100 V = 25 try: super_root, base_root,", "= M[0] # For ex 0 this is the same for both complex", "f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view", "dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N, 17, 2)) mmap_path", "= np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype,", "try: super_root, base_root, view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand =", "assert k in M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0] # For", "# Test Dataset This is a view dataset which loads images from a", "mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:]", "\"simple\", \"complex\"]: assert kk in M.labels if kk == \"complex\": for i in", "from edflow.util import walk, retrieve def _setup(root, N=100, V=25): from PIL import Image", "shape=(N, 17, 2)) mmap[:] = data for p in paths: image = (255", "kk == \"complex\": for i in range(2): for k in [\"attr1\", \"attr2\", \"image_\",", "V d = M[0] # For ex 0 this is the same for", "shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\")", "= np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,))", "= np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path =", "which loads images. All paths to the images are in the label `image`.", "True M.append_labels = False M.show() assert len(M) == V for kk in [\"simple1\",", "N=100, V=25): from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root)", "data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path,", "super_root, root, view_root def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you sure", "mode=\"w+\", shape=(V,)) mmap[:] = data # view 2 data = np.zeros(shape=(V, 5, 3)).astype(int)", "views/simple \"\"\".format( root ) ) return super_root, root, view_root def _teardown(test_data_root): if test_data_root", "paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype,", "mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test", "ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key, val): assert", "np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype,", "in paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p)", "def _setup(root, N=100, V=25): from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root", "np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\") as mfile:", "in range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i]", "for p in paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im =", "p in paths: image = (255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image)", "= [[single_ref] * 3] * 20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple,", "data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path,", "(255 * np.ones((64, 64, 3))).astype(np.uint8) im = Image.fromarray(image) im.save(p) with open(os.path.join(root, \"meta.yaml\"), \"w+\")", "[\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else:", "mfile.write( \"\"\" description: | # Test Dataset This is a view dataset which", "mode=\"w+\", shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N, 17, 2)) mmap_path =", "dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for p in paths: image", "dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\",", "view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand = True M.append_labels =", "mfile: mfile.write( \"\"\" description: | # Test Dataset This is a view dataset", "super_root = os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True)", "in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V", "base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple: views/simple complex: - views/complex", "range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:]", "M.expand = True M.append_labels = False M.show() assert len(M) == V for kk", "base_root, view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand = True M.append_labels", "\"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] =", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data # view 3", "you want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N =", "== \"complex\": for i in range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]:", "def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you sure you want to", "root ) ) return super_root, root, view_root def _teardown(test_data_root): if test_data_root == \".\":", "Dataset This is a view dataset which loads images from a base. base_dset:", "test_data_root == \".\": raise ValueError(\"Are you sure you want to delete this directory?\")", "def tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True) assert hasattr(M,", "view dataset which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {}", "same for both complex and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\":", "np import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve def", "the label `image`. ## Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\" )", "images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\",", "\"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for", "view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\",", "assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True) assert hasattr(M, \"meta\") finally: _teardown(super_root)", "64, 3)), \"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref] * 3] *", "this is the same for both complex and simple single_ref = {\"image\": np.ones(shape=(64,", "\"\"\" description: | # Test Dataset This is a dataset which loads images.", "for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k])", "17, 2)) mmap[:] = data for p in paths: image = (255 *", "mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\"", "and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0} ref_simple = single_ref", "dataset which loads images. All paths to the images are in the label", "k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) ==", "shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root,", "M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]:", "- views/complex - views/simple \"\"\".format( root ) ) return super_root, root, view_root def", "import pytest import numpy as np import os from edflow.data.believers.meta_view import MetaViewDataset from", "_setup(root, N=100, V=25): from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root =", "= _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand = True M.append_labels = False", "np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path = os.path.join(root,", "== V d = M[0] # For ex 0 this is the same", "\"index_\": 0, } def tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d, tester,", "\"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\",", "paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path = os.path.join(root, \"labels\",", "For ex 0 this is the same for both complex and simple single_ref", "V=25): from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root", "mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: |", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for p", "mmap[:] = paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap =", "N = 100 V = 25 try: super_root, base_root, view_root = _setup(\".\", N,", "\"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset This is a", "mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] =", "== \".\": raise ValueError(\"Are you sure you want to delete this directory?\") os.system(f\"rm", "in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) == V", "\"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path =", "\"complex\"]: assert kk in M.labels if kk == \"complex\": for i in range(2):", "a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple: views/simple complex:", "mode=\"w+\", shape=(N,)) mmap[:] = paths data = np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\")", "simple: views/simple complex: - views/complex - views/simple \"\"\".format( root ) ) return super_root,", "root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root,", "## Content image: images loader_kwargs: image: support: \"-1->1\" \"\"\" ) view_root = os.path.join(super_root,", "edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple: views/simple complex: - views/complex -", "# view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\",", "= 100 V = 25 try: super_root, base_root, view_root = _setup(\".\", N, V)", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data # view", "mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for p in paths: image =", "\"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data", "= data # view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join(", "os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path = os.path.join(", ") return super_root, root, view_root def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are", "are in the label `image`. ## Content image: images loader_kwargs: image: support: \"-1->1\"", "in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if kk == \"complex\": for", "kk in M.labels if kk == \"complex\": for i in range(2): for k", "f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"),", "f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N,", "in M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0] # For ex 0", "raise ValueError(\"Are you sure you want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\")", "dataset which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views:", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data # view 2 data", "3 data = np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype,", "retrieve def _setup(root, N=100, V=25): from PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\")", "description: | # Test Dataset This is a dataset which loads images. All", "view 1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" )", "the same for both complex and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)),", "np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,", "PIL import Image super_root = os.path.join(root, \"METAVIEW__test_data__METAVIEW\") super_root = os.path.abspath(super_root) root = os.path.join(super_root,", "np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] = data for p in paths:", "# view 1 data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\"", "mfile: mfile.write( \"\"\" description: | # Test Dataset This is a dataset which", "5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path,", "view_root def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you sure you want", "walk, retrieve def _setup(root, N=100, V=25): from PIL import Image super_root = os.path.join(root,", "M.labels if kk == \"complex\": for i in range(2): for k in [\"attr1\",", "shape=(V,)) mmap[:] = data # view 2 data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path", "for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k])", "complex: - views/complex - views/simple \"\"\".format( root ) ) return super_root, root, view_root", "np.arange(V).astype(int) mmap_path = os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:]", "os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths = np.array([os.path.join(root, \"images\", f\"{i:0>3d}.png\") for i in range(N)]) mmap_path", "== V else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in", "\"-1->1\" \"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view", "All paths to the images are in the label `image`. ## Content image:", "os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\", shape=(N,)) mmap[:] = paths data", "d = M[0] # For ex 0 this is the same for both", "data = np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap =", "M = MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show() assert len(M) ==", "ValueError(\"Are you sure you want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def", "= os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,", "os.path.abspath(super_root) root = os.path.join(super_root, \"base\") os.makedirs(os.path.join(root, \"images\"), exist_ok=True) os.makedirs(os.path.join(root, \"labels\"), exist_ok=True) paths =", "\"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for", "= os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data", "= np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\",", "description: | # Test Dataset This is a view dataset which loads images", "the images are in the label `image`. ## Content image: images loader_kwargs: image:", "super_root, base_root, view_root = _setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand = True", "np.arange(N) mmap_path = os.path.join(root, \"labels\", f\"attr1-*-{N}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:]", "f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:] = data", "M.append_labels = False M.show() assert len(M) == V for kk in [\"simple1\", \"simple\",", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data = np.ones(shape=(N,", "ref_complex = [[single_ref] * 3] * 20 ref = { \"simple1\": ref_simple, \"simple\":", "3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype,", "2)) mmap_path = os.path.join(root, \"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2))", "{} views: simple1: simple simple: views/simple complex: - views/complex - views/simple \"\"\".format( root", "loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple", "def test_meta_view_dset(): N = 100 V = 25 try: super_root, base_root, view_root =", "pytest import numpy as np import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util", "20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0,", "| # Test Dataset This is a view dataset which loads images from", "shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description:", "\"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V, 5, 3)) mmap[:]", "V = 25 try: super_root, base_root, view_root = _setup(\".\", N, V) M =", "as mfile: mfile.write( \"\"\" description: | # Test Dataset This is a view", "[ref_complex, ref_simple], \"index_\": 0, } def tester(key, val): assert np.all(val == retrieve(ref, key))", "os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int) mmap_path", "if test_data_root == \".\": raise ValueError(\"Are you sure you want to delete this", "This is a dataset which loads images. All paths to the images are", "\"complex\": for i in range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert", "\"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key, val): assert np.all(val == retrieve(ref,", "= single_ref ref_complex = [[single_ref] * 3] * 20 ref = { \"simple1\":", "tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True) assert hasattr(M, \"meta\")", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as", ") ) return super_root, root, view_root def _teardown(test_data_root): if test_data_root == \".\": raise", "directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100 V = 25 try:", "a dataset which loads images. All paths to the images are in the", "open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset This", ") view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data", "to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100 V", "sure you want to delete this directory?\") os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N", "} def tester(key, val): assert np.all(val == retrieve(ref, key)) walk(d, tester, pass_key=True) assert", "images from a base. base_dset: edflow.data.believers.meta.MetaDataset base_kwargs: root: {} views: simple1: simple simple:", "data = np.zeros(shape=(V, 5, 3)).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"complex-*-{V}x5x3-*-{data.dtype}.npy\" )", "N, V) M = MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show() assert", "\"\"\" ) view_root = os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1", "os.system(f\"rm -rf {test_data_root}\") def test_meta_view_dset(): N = 100 V = 25 try: super_root,", "3)), \"index_\": 0} ref_simple = single_ref ref_complex = [[single_ref] * 3] * 20", "= os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:]", "os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 17, 2)) mmap[:] =", "Dataset This is a dataset which loads images. All paths to the images", "mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\")", "mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path = os.path.join(root, \"labels\",", "in M.labels[kk][i] assert len(M.labels[kk][i][k]) == V else: for k in [\"attr1\", \"attr2\", \"image_\",", "\"labels\", f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data", "\".\": raise ValueError(\"Are you sure you want to delete this directory?\") os.system(f\"rm -rf", "f\"attr2-*-{N}x2-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N, 2)) mmap[:] = data data =", "view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] =", "for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if kk ==", "* 3] * 20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex,", "os.path.join(view_root, \"labels\", f\"simple-*-{V}-*-{data.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with", "_setup(\".\", N, V) M = MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show()", "assert len(M.labels[kk][k]) == V d = M[0] # For ex 0 this is", "== V for kk in [\"simple1\", \"simple\", \"complex\"]: assert kk in M.labels if", "This is a view dataset which loads images from a base. base_dset: edflow.data.believers.meta.MetaDataset", "= np.memmap(mmap_path, dtype=data.dtype, mode=\"w+\", shape=(N,)) mmap[:] = data data = np.zeros(shape=(N, 2)) mmap_path", "else: for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk] assert", "for both complex and simple single_ref = {\"image\": np.ones(shape=(64, 64, 3)), \"index_\": 0}", "len(M.labels[kk][k]) == V d = M[0] # For ex 0 this is the", "\"\"\" description: | # Test Dataset This is a view dataset which loads", "M[0] # For ex 0 this is the same for both complex and", "k in M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0] # For ex", "* 20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\":", "with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write( \"\"\" description: | # Test Dataset", "as np import os from edflow.data.believers.meta_view import MetaViewDataset from edflow.util import walk, retrieve", "\"keypoints\"]: assert k in M.labels[kk] assert len(M.labels[kk][k]) == V d = M[0] #", "\"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple], \"index_\": 0, } def tester(key, val):", "V) M = MetaViewDataset(view_root) M.expand = True M.append_labels = False M.show() assert len(M)", "i in range(N)]) mmap_path = os.path.join(root, \"labels\", f\"image:image-*-{N}-*-{paths.dtype}.npy\") mmap = np.memmap(mmap_path, dtype=paths.dtype, mode=\"w+\",", "for i in range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k", "simple simple: views/simple complex: - views/complex - views/simple \"\"\".format( root ) ) return", "3] * 20 ref = { \"simple1\": ref_simple, \"simple\": ref_simple, \"complex\": [ref_complex, ref_simple],", "images. All paths to the images are in the label `image`. ## Content", "= data data = np.ones(shape=(N, 17, 2)) mmap_path = os.path.join(root, \"labels\", f\"keypoints-*-{N}x17x2-*-{data.dtype}.npy\") mmap", "= np.arange(V).astype(int) mmap_path = os.path.join( view_root, \"labels\", \"views\", f\"simple-*-{V}-*-{data.dtype}.npy\" ) mmap = np.memmap(mmap_path,", "return super_root, root, view_root def _teardown(test_data_root): if test_data_root == \".\": raise ValueError(\"Are you", "<reponame>ffeldmann/edflow import pytest import numpy as np import os from edflow.data.believers.meta_view import MetaViewDataset", "# Test Dataset This is a dataset which loads images. All paths to", "= os.path.join(super_root, \"mview\") os.makedirs(os.path.join(view_root, \"labels\", \"views\"), exist_ok=True) # view 1 data = np.arange(V).astype(int)", "range(2): for k in [\"attr1\", \"attr2\", \"image_\", \"keypoints\"]: assert k in M.labels[kk][i] assert", "{test_data_root}\") def test_meta_view_dset(): N = 100 V = 25 try: super_root, base_root, view_root", "assert kk in M.labels if kk == \"complex\": for i in range(2): for", "dtype=data.dtype, mode=\"w+\", shape=(V,)) mmap[:] = data with open(os.path.join(view_root, \"meta.yaml\"), \"w+\") as mfile: mfile.write(" ]
[ "include from django.contrib import admin from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/',", "django.conf.urls import url, include from django.contrib import admin from django.views.generic import TemplateView urlpatterns", "url, include from django.contrib import admin from django.views.generic import TemplateView urlpatterns = [", "django.contrib import admin from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^',", "from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('server.urls'), name='server'), ]", "admin from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('server.urls'), name='server'),", "from django.conf.urls import url, include from django.contrib import admin from django.views.generic import TemplateView", "import url, include from django.contrib import admin from django.views.generic import TemplateView urlpatterns =", "import admin from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('server.urls'),", "from django.contrib import admin from django.views.generic import TemplateView urlpatterns = [ url(r'^admin/', admin.site.urls)," ]
[ "############################################################################# # Place the colorbar legend vertically and to the right side #############################################################################", "and y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now change the", "VCS: v = vcs.init() # Assign the variable \"t_asd\" to the persistent 'ASD'", "the right y-axis t_asd.box1.x2 = 0.87 # set the top x-axis (secind y", "# move the \"Mean\" text to x-axis center t_asd.mean.y=0.5 # move the \"Mean\"", "t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 #", "the time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) #", "t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis center t_asd.data.priority = 0 #", "persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' ) # Create a new template", "existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot the data", "(second y axis) to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0", "to y-axis center t_asd.data.priority = 0 # remove the data so the \"Mean\"", "y-axis center t_asd.data.priority = 0 # remove the data so the \"Mean\" text", "# Create a new template from the existing 'ASD' template t2_asd = v.createtemplate(", ") # Plot the data using the above 'ASD' template. v.plot( data, t_asd", "'ASD' ) # Plot the data using the above 'ASD' template. v.plot( data,", "a new template from the existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD'", "clear the canvas and plot the template again v.clear() v.plot( data, t_asd )", "and get a subset of the time dimension data = cdmsfile('clt', longitude=(-180, 180),", "\"Mean\" text is visable. v.update() ############################################################################# # Place the colorbar legend vertically and", "box - the right y-axis t_asd.box1.x2 = 0.87 # set the top x-axis", "x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis center t_asd.data.priority =", "# remove the data so the \"Mean\" text is visable. v.update() ############################################################################# #", "to the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' ) # Create a", "= v.createtemplate( 'new', 'ASD' ) # Plot the data using the above 'ASD'", "############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() # list the legend members", "data set and get a subset of the time dimension data = cdmsfile('clt',", "to the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() #", "180), latitude = (-90., 90.)) # Initial VCS: v = vcs.init() # Assign", "xmean_current t_asd.mean.y = ymean_current # move the right side of a plot to", "0 t_asd.legend.priority=0 # save current 'Mean' placemant for x and y coordinates xmean_current", "vcs, cdms2 as cdms, cdutil, time, os, sys # Open data file: filepath", "= cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) # Initial VCS: v =", "then move the sorrounding box - the right y-axis t_asd.box1.x2 = 0.87 #", "# set the right y-axis (second y axis) to be blank (priority=0) t_asd.ylabel2.priority", "where it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move the right", "to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set the right", "the legend members v.mode=0 # turn the automatic update off # move 'Mean'", "Import the modules needed for the tuturial import vcs, cdms2 as cdms, cdutil,", "list the legend members v.mode=0 # turn the automatic update off # move", "so the \"Mean\" text is visable. v.update() ############################################################################# # Place the colorbar legend", "Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) #", "center t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis center t_asd.data.priority = 0", "= 0 t_asd.legend.priority=0 # save current 'Mean' placemant for x and y coordinates", "to make space for the legend # first move the inner plot t_asd.data.x2", "ymean_current # move the right side of a plot to the left to", "t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move the colorbar legend position, to", "# Plot the data using the above 'ASD' template. v.plot( data, t_asd )", "v.mode=0 # turn the automatic update off # move 'Mean' text back where", "the tuturial import vcs, cdms2 as cdms, cdutil, time, os, sys # Open", "v.gettemplate( 'ASD' ) # Create a new template from the existing 'ASD' template", "segments from the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority", "back where it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move the", ") # Remove picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority = 0", "'Mean' placemant for x and y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y", "(-90., 90.)) # Initial VCS: v = vcs.init() # Assign the variable \"t_asd\"", "# list the legend members v.mode=0 # turn the automatic update off #", "legend members v.mode=0 # turn the automatic update off # move 'Mean' text", "a plot to the left to make space for the legend # first", "t_asd.data.priority = 0 # remove the data so the \"Mean\" text is visable.", "t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot the template again v.clear()", "data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) # Initial VCS: v", "0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean' placemant for x and", "right side of a plot to the left to make space for the", "now change the placement t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis center", "move the \"Mean\" text to y-axis center t_asd.data.priority = 0 # remove the", "# Assign the variable \"t_asd\" to the persistent 'ASD' template. t_asd = v.gettemplate(", "0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean' placemant", "to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot", "the placement t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis center t_asd.mean.y=0.5 #", "1 t_asd.legend.priority = 1 t_asd.legend.list() # list the legend members v.mode=0 # turn", "set and get a subset of the time dimension data = cdmsfile('clt', longitude=(-180,", "remove the data so the \"Mean\" text is visable. v.update() ############################################################################# # Place", "= 0.87 # set the top x-axis (secind y axis) to be blank", "# Import the modules needed for the tuturial import vcs, cdms2 as cdms,", "move the \"Mean\" text to x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text", "space for the legend # first move the inner plot t_asd.data.x2 = 0.87", "\"Mean\" text to x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis", "# Place the colorbar legend vertically and to the right side ############################################################################# t_asd.data.priority", "for the legend # first move the inner plot t_asd.data.x2 = 0.87 #", "make space for the legend # first move the inner plot t_asd.data.x2 =", "from the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority =", "ymean_current = t_asd.mean.y # now change the placement t_asd.mean.x=0.5 # move the \"Mean\"", "dimensional data set and get a subset of the time dimension data =", "numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial import vcs,", "'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data set", "vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas", "cdms.open( filepath ) # Extract a 3 dimensional data set and get a", "cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) # Initial VCS: v = vcs.init()", "first move the inner plot t_asd.data.x2 = 0.87 # then move the sorrounding", "for x and y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now", "0 t_asd.ytic2.priority = 0 # move the colorbar legend position, to be vertial", "os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open(", "new template from the existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' )", "Assign the variable \"t_asd\" to the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD'", "90.)) # Initial VCS: v = vcs.init() # Assign the variable \"t_asd\" to", "text to y-axis center t_asd.data.priority = 0 # remove the data so the", "template t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot the data using the", "to x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis center t_asd.data.priority", "t_asd = v.gettemplate( 'ASD' ) # Create a new template from the existing", "t_asd.legend.list() # list the legend members v.mode=0 # turn the automatic update off", "of a plot to the left to make space for the legend #", "plot to the left to make space for the legend # first move", "= 0 # move the colorbar legend position, to be vertial and to", "sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath", "move the inner plot t_asd.data.x2 = 0.87 # then move the sorrounding box", "Remove picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority =", "0.87 # then move the sorrounding box - the right y-axis t_asd.box1.x2 =", "# Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath )", "t_asd.box1.x2 = 0.87 # set the top x-axis (secind y axis) to be", "y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now change the placement", "the \"Mean\" text to x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text to", "= os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional", "t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current", "data using the above 'ASD' template. v.plot( data, t_asd ) # Remove picture", "v.update() ############################################################################# # Place the colorbar legend vertically and to the right side", "t_asd.mean.x ymean_current = t_asd.mean.y # now change the placement t_asd.mean.x=0.5 # move the", "'ASD' template. t_asd = v.gettemplate( 'ASD' ) # Create a new template from", "right y-axis (second y axis) to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority", "picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0", "was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move the right side of", "x and y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now change", "the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' ) # Create a new", "a 3 dimensional data set and get a subset of the time dimension", "t_asd.mean.y = ymean_current # move the right side of a plot to the", "'ASD' ) # Create a new template from the existing 'ASD' template t2_asd", "# Remove picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority", "os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data", ") # Extract a 3 dimensional data set and get a subset of", "# move the \"Mean\" text to y-axis center t_asd.data.priority = 0 # remove", "a subset of the time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude =", "y-axis (second y axis) to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority =", "template. t_asd = v.gettemplate( 'ASD' ) # Create a new template from the", "xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now change the placement t_asd.mean.x=0.5 #", "vcs.init() # Assign the variable \"t_asd\" to the persistent 'ASD' template. t_asd =", "legend position, to be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3", "the right side of a plot to the left to make space for", "data, t_asd ) # Remove picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority", "t_asd ) # Remove picture segments from the page. t_asd.list( ) t_asd.xlabel2.priority =", "# save current 'Mean' placemant for x and y coordinates xmean_current = t_asd.mean.x", "as cdms, cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data,", "above 'ASD' template. v.plot( data, t_asd ) # Remove picture segments from the", "\"Mean\" text to y-axis center t_asd.data.priority = 0 # remove the data so", "to be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear", "= 0 t_asd.xtic2.priority = 0 # set the right y-axis (second y axis)", "and to the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list()", "# Initial VCS: v = vcs.init() # Assign the variable \"t_asd\" to the", "1 t_asd.legend.list() # list the legend members v.mode=0 # turn the automatic update", "be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move the colorbar", "off # move 'Mean' text back where it was t_asd.mean.x = xmean_current t_asd.mean.y", "= 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean'", "0 # set the right y-axis (second y axis) to be blank (priority=0)", "right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() # list the", "legend vertically and to the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority =", "Initial VCS: v = vcs.init() # Assign the variable \"t_asd\" to the persistent", "0 # remove the data so the \"Mean\" text is visable. v.update() #############################################################################", "cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data set and", "move the colorbar legend position, to be vertial and to the right t_asd.legend.x1=0.9", "= t_asd.mean.y # now change the placement t_asd.mean.x=0.5 # move the \"Mean\" text", "Adapted for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial", "'Mean' text back where it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current #", "colorbar legend vertically and to the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority", "the data using the above 'ASD' template. v.plot( data, t_asd ) # Remove", "t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean' placemant for", "= 0 # remove the data so the \"Mean\" text is visable. v.update()", "0.87 # set the top x-axis (secind y axis) to be blank t_asd.xlabel2.priority", "using the above 'ASD' template. v.plot( data, t_asd ) # Remove picture segments", "latitude = (-90., 90.)) # Initial VCS: v = vcs.init() # Assign the", "for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial import", "update off # move 'Mean' text back where it was t_asd.mean.x = xmean_current", "cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile", "y axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set", "text is visable. v.update() ############################################################################# # Place the colorbar legend vertically and to", "Extract a 3 dimensional data set and get a subset of the time", "the modules needed for the tuturial import vcs, cdms2 as cdms, cdutil, time,", "data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract", "# move 'Mean' text back where it was t_asd.mean.x = xmean_current t_asd.mean.y =", "by convertcdms.py # Import the modules needed for the tuturial import vcs, cdms2", "t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis center t_asd.mean.y=0.5 # move the", ") t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save", "tuturial import vcs, cdms2 as cdms, cdutil, time, os, sys # Open data", "blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set the right y-axis (second", "placement t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis center t_asd.mean.y=0.5 # move", "t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot the template again", "3 dimensional data set and get a subset of the time dimension data", "the top x-axis (secind y axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority", "it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move the right side", "# turn the automatic update off # move 'Mean' text back where it", "t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot the template again v.clear() v.plot(", "change the placement t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis center t_asd.mean.y=0.5", "t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move the right side of a", "y axis) to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 #", "= xmean_current t_asd.mean.y = ymean_current # move the right side of a plot", "of the time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.))", "\"t_asd\" to the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' ) # Create", "= 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean' placemant for x", "t_asd.legend.priority = 1 t_asd.legend.list() # list the legend members v.mode=0 # turn the", "the page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0", "set the top x-axis (secind y axis) to be blank t_asd.xlabel2.priority = 0", "v.plot( data, t_asd ) # Remove picture segments from the page. t_asd.list( )", "Place the colorbar legend vertically and to the right side ############################################################################# t_asd.data.priority =", "the data so the \"Mean\" text is visable. v.update() ############################################################################# # Place the", ") # Create a new template from the existing 'ASD' template t2_asd =", "axis) to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move", "variable \"t_asd\" to the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' ) #", "center t_asd.data.priority = 0 # remove the data so the \"Mean\" text is", "t_asd.ytic2.priority = 0 # move the colorbar legend position, to be vertial and", "t_asd.legend.priority=0 # save current 'Mean' placemant for x and y coordinates xmean_current =", "t_asd.xtic2.priority = 0 t_asd.legend.priority=0 # save current 'Mean' placemant for x and y", "'ASD' template. v.plot( data, t_asd ) # Remove picture segments from the page.", "t_asd.mean.y # now change the placement t_asd.mean.x=0.5 # move the \"Mean\" text to", "placemant for x and y coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y #", "time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile =", "# Extract a 3 dimensional data set and get a subset of the", "the automatic update off # move 'Mean' text back where it was t_asd.mean.x", "filepath ) # Extract a 3 dimensional data set and get a subset", "0 t_asd.xtic2.priority = 0 # set the right y-axis (second y axis) to", "is visable. v.update() ############################################################################# # Place the colorbar legend vertically and to the", "convertcdms.py # Import the modules needed for the tuturial import vcs, cdms2 as", "side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() # list the legend", "move the sorrounding box - the right y-axis t_asd.box1.x2 = 0.87 # set", "(priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move the colorbar legend position,", "automatic update off # move 'Mean' text back where it was t_asd.mean.x =", "the colorbar legend vertically and to the right side ############################################################################# t_asd.data.priority = 1", "v = vcs.init() # Assign the variable \"t_asd\" to the persistent 'ASD' template.", "from the existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot", "= 0 t_asd.ytic2.priority = 0 # move the colorbar legend position, to be", "0 # move the colorbar legend position, to be vertial and to the", "- the right y-axis t_asd.box1.x2 = 0.87 # set the top x-axis (secind", "get a subset of the time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude", "= 0.87 # then move the sorrounding box - the right y-axis t_asd.box1.x2", "the \"Mean\" text to y-axis center t_asd.data.priority = 0 # remove the data", "the \"Mean\" text is visable. v.update() ############################################################################# # Place the colorbar legend vertically", "the inner plot t_asd.data.x2 = 0.87 # then move the sorrounding box -", "the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot the", "= (-90., 90.)) # Initial VCS: v = vcs.init() # Assign the variable", "# now change the placement t_asd.mean.x=0.5 # move the \"Mean\" text to x-axis", "t_asd.legend.y2=0.3 # clear the canvas and plot the template again v.clear() v.plot( data,", "members v.mode=0 # turn the automatic update off # move 'Mean' text back", "be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the", "be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set the right y-axis", "t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set the right y-axis (second y", "= 1 t_asd.legend.priority = 1 t_asd.legend.list() # list the legend members v.mode=0 #", "t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot the data using the above", "template. v.plot( data, t_asd ) # Remove picture segments from the page. t_asd.list(", "the left to make space for the legend # first move the inner", "legend # first move the inner plot t_asd.data.x2 = 0.87 # then move", "visable. v.update() ############################################################################# # Place the colorbar legend vertically and to the right", "the variable \"t_asd\" to the persistent 'ASD' template. t_asd = v.gettemplate( 'ASD' )", "cdms2 as cdms, cdutil, time, os, sys # Open data file: filepath =", "turn the automatic update off # move 'Mean' text back where it was", "top x-axis (secind y axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority =", "= cdms.open( filepath ) # Extract a 3 dimensional data set and get", "subset of the time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90.,", "= t_asd.mean.x ymean_current = t_asd.mean.y # now change the placement t_asd.mean.x=0.5 # move", "coordinates xmean_current = t_asd.mean.x ymean_current = t_asd.mean.y # now change the placement t_asd.mean.x=0.5", "= 1 t_asd.legend.list() # list the legend members v.mode=0 # turn the automatic", "blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move the colorbar legend", "position, to be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 #", "right y-axis t_asd.box1.x2 = 0.87 # set the top x-axis (secind y axis)", "t_asd.xtic2.priority = 0 # set the right y-axis (second y axis) to be", "for the tuturial import vcs, cdms2 as cdms, cdutil, time, os, sys #", "colorbar legend position, to be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95", "x-axis (secind y axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0", "template from the existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' ) #", "import vcs, cdms2 as cdms, cdutil, time, os, sys # Open data file:", "to be blank (priority=0) t_asd.ylabel2.priority = 0 t_asd.ytic2.priority = 0 # move the", "left to make space for the legend # first move the inner plot", "needed for the tuturial import vcs, cdms2 as cdms, cdutil, time, os, sys", "the colorbar legend position, to be vertial and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82", "= 0 # set the right y-axis (second y axis) to be blank", "text to x-axis center t_asd.mean.y=0.5 # move the \"Mean\" text to y-axis center", "plot t_asd.data.x2 = 0.87 # then move the sorrounding box - the right", "time dimension data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) # Initial", "save current 'Mean' placemant for x and y coordinates xmean_current = t_asd.mean.x ymean_current", "data so the \"Mean\" text is visable. v.update() ############################################################################# # Place the colorbar", "Create a new template from the existing 'ASD' template t2_asd = v.createtemplate( 'new',", "the legend # first move the inner plot t_asd.data.x2 = 0.87 # then", "'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot the data using", "side of a plot to the left to make space for the legend", "# clear the canvas and plot the template again v.clear() v.plot( data, t_asd", "right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and plot the template", "longitude=(-180, 180), latitude = (-90., 90.)) # Initial VCS: v = vcs.init() #", "filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3", "sorrounding box - the right y-axis t_asd.box1.x2 = 0.87 # set the top", "# first move the inner plot t_asd.data.x2 = 0.87 # then move the", "set the right y-axis (second y axis) to be blank (priority=0) t_asd.ylabel2.priority =", "= ymean_current # move the right side of a plot to the left", "the above 'ASD' template. v.plot( data, t_asd ) # Remove picture segments from", "vertically and to the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1", "# move the colorbar legend position, to be vertial and to the right", "t_asd.data.x2 = 0.87 # then move the sorrounding box - the right y-axis", "the existing 'ASD' template t2_asd = v.createtemplate( 'new', 'ASD' ) # Plot the", "(secind y axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 #", "and to the right t_asd.legend.x1=0.9 t_asd.legend.y1=0.82 t_asd.legend.x2=0.95 t_asd.legend.y2=0.3 # clear the canvas and", "file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a", "v.createtemplate( 'new', 'ASD' ) # Plot the data using the above 'ASD' template.", "text back where it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current # move", "= v.gettemplate( 'ASD' ) # Create a new template from the existing 'ASD'", "inner plot t_asd.data.x2 = 0.87 # then move the sorrounding box - the", "axis) to be blank t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 # set the", "# move the right side of a plot to the left to make", "page. t_asd.list( ) t_asd.xlabel2.priority = 0 t_asd.xtic2.priority = 0 t_asd.xtic2.priority = 0 t_asd.legend.priority=0", "move the right side of a plot to the left to make space", "# then move the sorrounding box - the right y-axis t_asd.box1.x2 = 0.87", "the sorrounding box - the right y-axis t_asd.box1.x2 = 0.87 # set the", "'new', 'ASD' ) # Plot the data using the above 'ASD' template. v.plot(", "y-axis t_asd.box1.x2 = 0.87 # set the top x-axis (secind y axis) to", "the right y-axis (second y axis) to be blank (priority=0) t_asd.ylabel2.priority = 0", "cdms, cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc')", "dimension data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.)) # Initial VCS:", "# set the top x-axis (secind y axis) to be blank t_asd.xlabel2.priority =", "Plot the data using the above 'ASD' template. v.plot( data, t_asd ) #", "# Adapted for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the", "t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() # list the legend members v.mode=0", "the right side ############################################################################# t_asd.data.priority = 1 t_asd.legend.priority = 1 t_asd.legend.list() # list", "current 'Mean' placemant for x and y coordinates xmean_current = t_asd.mean.x ymean_current =", "= vcs.init() # Assign the variable \"t_asd\" to the persistent 'ASD' template. t_asd", "to the left to make space for the legend # first move the", "modules needed for the tuturial import vcs, cdms2 as cdms, cdutil, time, os,", "move 'Mean' text back where it was t_asd.mean.x = xmean_current t_asd.mean.y = ymean_current" ]
[ "0: print(\"Data not installed, no genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners)", "hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args)", "= default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved", "find docker image %s in local repository\" % args.image) def docker_image_arg(args): if not", "hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image", "updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy", "def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\",", "to installation to be used on subsequent upgrades. Avoids needing to re-include genomes", "if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure", "docker image %s in local repository\" % args.image) def docker_image_arg(args): if not hasattr(args,", "data. \"\"\" updates = [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm()", "for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1 and", "docker_image_arg(args): if not hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args) args =", "# Ensure external galaxy configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts,", "def pull(args, dockerconf): \"\"\"Pull down latest docker image, using export uploaded to S3", "out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and", "subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1 and parts[0] == args.image:", "import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf):", "upgrade a bcbio-nextgen installation. \"\"\" from __future__ import print_function import os import subprocess", "\"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot", "but upload size is currently smaller with an exported gzipped image. \"\"\" print(\"Retrieving", "def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts", "os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else:", "parts = image.split() if len(parts) > 1 and parts[0] == args.image: return raise", "`--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates:", "%s in local repository\" % args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or", "and parts[0] == args.image: return raise ValueError(\"Could not find docker image %s in", "print_function import os import subprocess import sys import yaml from bcbiovm.docker import manage,", "subsequent upgrades. Avoids needing to re-include genomes and aligners on command line. \"\"\"", "from __future__ import print_function import os import subprocess import sys import yaml from", "in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val = getattr(args, attr) if", "configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if", "server but upload size is currently smaller with an exported gzipped image. \"\"\"", "import sys import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def", "size is currently smaller with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker", "bcbio-nextgen docker image with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image,", "args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if", "or upgrade a bcbio-nextgen installation. \"\"\" from __future__ import print_function import os import", "args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config", "args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm():", "'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data:", "tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name for docker import\"", "1 and parts[0] == args.image: return raise ValueError(\"Could not find docker image %s", "in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args,", "repository\" % args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or not args.image: default_args", "sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed, no aligners provided with `--aligners`", "= _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if", "= _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val", "install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as", "= _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for", "[\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for a in", "_get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with", "\"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker image and data. \"\"\" updates", "as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config", "x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args)", "bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def", "tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in sync when doing", "exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code and tools\") #", "not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if x not", "not installed, no genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0:", "def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and", "args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args):", "sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) ==", "code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name for", "getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args", "cur_config = yaml.load(in_handle) else: cur_config = {} for attr in [\"genomes\", \"aligners\"]: if", "docker image and data. \"\"\" updates = [] args = add_install_defaults(args) if args.wrapper:", "code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not", "\"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker image, using export uploaded", "on subsequent upgrades. Avoids needing to re-include genomes and aligners on command line.", "image and data. \"\"\" updates = [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper", "DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker image and data.", "for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for x", "_check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts =", "re-include genomes and aligners on command line. \"\"\" install_config = _get_config_file(args) if install_config", "manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data not installed, no", "_get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir,", "down latest docker image, using export uploaded to S3 bucket. Long term plan", "conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down", "Avoids needing to re-include genomes and aligners on command line. \"\"\" install_config =", "!= DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config,", "args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"):", "[\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val = getattr(args, attr) if x", "\"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if", "docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation", "for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper", "default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []):", "return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config =", "default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE", "not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args,", "in local repository\" % args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or not", "installation to be used on subsequent upgrades. Avoids needing to re-include genomes and", "currently smaller with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with", "to command line arguments. \"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]:", "\"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker image,", "details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes:", "used on subsequent upgrades. Avoids needing to re-include genomes and aligners on command", "latest docker image, using export uploaded to S3 bucket. Long term plan is", "\"See 'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args): clargs = [\"upgrade\"] if", "\"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker image, using export uploaded to", "args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\")", "args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin", "subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker", "pull(args, dockerconf): \"\"\"Pull down latest docker image, using export uploaded to S3 bucket.", "{} for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for", "import print_function import os import subprocess import sys import yaml from bcbiovm.docker import", "need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\") def", "import subprocess import sys import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE =", "print(\"Data not installed, no aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\")", "_check_docker_image(args) # Ensure external galaxy configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image,", "`--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed, no aligners provided", "else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with", "flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm", "for more details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g", "None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config", "arguments. \"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x in", "add_install_defaults(args): \"\"\"Add previously saved installation defaults to command line arguments. \"\"\" default_args =", "\"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1", "bcbio-nextgen installation. \"\"\" from __future__ import print_function import os import subprocess import sys", "\"aligners\"]: for x in default_args.get(attr, []): new_val = getattr(args, attr) if x not", "galaxy configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data:", "\"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if", "import os import subprocess import sys import yaml from bcbiovm.docker import manage, mounts", "updates = [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts =", "default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir", "local repository\" % args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or not args.image:", "sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated", "new_val = getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr,", "mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf) _check_docker_image(args)", "be used on subsequent upgrades. Avoids needing to re-include genomes and aligners on", "DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle,", "attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image:", "print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and \".join(updates)) else: print(\"\\nNo update targets", "def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not", "getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and", "from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction", "\"\"\"Pull down latest docker image, using export uploaded to S3 bucket. Long term", "args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args):", "\"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation to be used", "image]) assert args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image])", "needing to re-include genomes and aligners on command line. \"\"\" install_config = _get_config_file(args)", "getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return args def", "\"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr,", "= args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args):", "bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of", "and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config", "for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to", "[\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr):", "\"\"\" install_config = _get_config_file(args) if install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config)", "\"\"\"Save arguments passed to installation to be used on subsequent upgrades. Avoids needing", "in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or", "not hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args)", "_save_install_defaults(args): \"\"\"Save arguments passed to installation to be used on subsequent upgrades. Avoids", "if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return", "g in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs", "_add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image", "installaction of docker image and data. \"\"\" updates = [] args = add_install_defaults(args)", "update targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for", "subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\",", "attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = [] for x in", "[] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"])", "def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, \"install-params.yaml\")", "with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull", "new_val) args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image exists.", "upload size is currently smaller with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen", "if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle)", "code and third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration", "genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed,", "os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else:", "return raise ValueError(\"Could not find docker image %s in local repository\" % args.image)", "with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code and", "bucket. Long term plan is to use the docker index server but upload", "default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image in", "subprocess import sys import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\"", "image with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image", "uploaded to S3 bucket. Long term plan is to use the docker index", "install_config = _get_config_file(args) if install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config) >", "args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args)", "\"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker image, using", "full(args, dockerconf): \"\"\"Full installaction of docker image and data. \"\"\" updates = []", "with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name", "scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party", "yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config)", "as in_handle: cur_config = yaml.load(in_handle) else: cur_config = {} for attr in [\"genomes\",", "attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args =", "= [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir,", "docker image, using export uploaded to S3 bucket. Long term plan is to", "= yaml.load(in_handle) else: cur_config = {} for attr in [\"genomes\", \"aligners\"]: if not", "clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a])", "line arguments. \"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x", "% args.image) def docker_image_arg(args): if not hasattr(args, \"image\") or not args.image: default_args =", "= _get_config_file(args) if install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0:", "pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in sync when doing tool", "with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config = {} for attr", "cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if x not in", "attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val = getattr(args, attr)", "line. \"\"\" install_config = _get_config_file(args) if install_config is None: return if os.path.exists(install_config) and", "len(args.aligners) == 0: print(\"Data not installed, no aligners provided with `--aligners` flag\") sys.exit(1)", "print(\"Data not installed, no genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) ==", "no aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts,", "= [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for a", "and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name for docker", "g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm", "open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args)", "def add_install_defaults(args): \"\"\"Add previously saved installation defaults to command line arguments. \"\"\" default_args", "third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in sync", "= _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir =", "== \"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args):", "docker image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if", "os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args,", "default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved installation", "import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker", "with open(install_config) as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not", "[] for x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if", "args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\"", "if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args,", "0: print(\"Data not installed, no aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological", "if not hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args,", "upgrade -h' for more details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\")", "the docker index server but upload size is currently smaller with an exported", "in_handle: cur_config = yaml.load(in_handle) else: cur_config = {} for attr in [\"genomes\", \"aligners\"]:", "updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and \".join(updates)) else: print(\"\\nNo update", "more details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in", "else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest", "args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation to be used on subsequent", "= _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as", "image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1 and parts[0]", "yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not args.image:", "specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\")", "default_args): if not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\")", "[]): new_val = getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args,", "upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\")", "in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\"", "-h' for more details.\") def _get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for", "x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] =", "external galaxy configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if", "= os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\")", "image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts)", "smaller with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code", "dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation to be used on", "saved installation defaults to command line arguments. \"\"\" default_args = _get_install_defaults(args) for attr", "\"\"\"Install or upgrade a bcbio-nextgen installation. \"\"\" from __future__ import print_function import os", "passed to installation to be used on subsequent upgrades. Avoids needing to re-include", "data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\"", "# subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\",", "a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code.", "an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code and tools\")", "updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest", "new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure", "term plan is to use the docker index server but upload size is", "in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with", "to re-include genomes and aligners on command line. \"\"\" install_config = _get_config_file(args) if", "allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) >", "_get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and \".join(updates))", "return {} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not args.image: if", "\"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin,", "provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed, no", "%s\" % \" and \".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper', '--tools'", "args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir,", "> 0: with open(install_config) as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args):", "and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return yaml.load(in_handle) return {} def", "= add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools:", "dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in sync when doing tool upgrade", "\" and \".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper', '--tools' or '--data'\\n\"", "args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\",", "updated with latest %s\" % \" and \".join(updates)) else: print(\"\\nNo update targets specified,", "manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker image", "'--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args): clargs", "use the docker index server but upload size is currently smaller with an", "_get_config_file(args) if install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with", "print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\",", "args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved installation defaults to", "aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args))", "image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments", "if install_config is None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config)", "cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def", "_check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not os.path.exists(config_dir): os.makedirs(config_dir)", "on command line. \"\"\" install_config = _get_config_file(args) if install_config is None: return if", "cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config,", "_check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" %", "dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf) _check_docker_image(args) #", "docker index server but upload size is currently smaller with an exported gzipped", "_save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and \".join(updates)) else:", "x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image !=", "= DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved installation defaults to command", "not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args def", "else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved installation defaults", "flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed, no aligners provided with", "when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0:", "args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and", "update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"])", "\"\"\" print(\"Retrieving bcbio-nextgen docker image with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image])", "0: with open(install_config) as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if", "not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\",", "cur_config = {} for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] =", "\".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py", "provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args)", "DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously saved installation defaults to command line", "'--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args): clargs = [\"upgrade\"]", "x in default_args.get(attr, []): new_val = getattr(args, attr) if x not in getattr(args,", "if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as", "os import subprocess import sys import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE", "aligners on command line. \"\"\" install_config = _get_config_file(args) if install_config is None: return", "to be used on subsequent upgrades. Avoids needing to re-include genomes and aligners", "in default_args.get(attr, []): new_val = getattr(args, attr) if x not in getattr(args, attr):", "image, using export uploaded to S3 bucket. Long term plan is to use", "return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not", "dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args,", "no genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not", "args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external", "if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code", "and aligners on command line. \"\"\" install_config = _get_config_file(args) if install_config is None:", "install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return yaml.load(in_handle)", "party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in sync when", "\"\"\"Add previously saved installation defaults to command line arguments. \"\"\" default_args = _get_install_defaults(args)", "add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen", "raise ValueError(\"Could not find docker image %s in local repository\" % args.image) def", "0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config = {} for", "image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code and tools\") # subprocess.check_call([\"docker\", \"pull\",", "for g in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return", "S3 bucket. Long term plan is to use the docker index server but", "dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data not installed, no genomes", "\"\"\"Ensure docker image exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split()", "clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for", "print(\"\\nNo update targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h'", "clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin =", "upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data not installed,", "in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE", "> 1 and parts[0] == args.image: return raise ValueError(\"Could not find docker image", "with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data not installed, no aligners", "upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin):", "sys import yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args,", "is to use the docker index server but upload size is currently smaller", "== 0: print(\"Data not installed, no aligners provided with `--aligners` flag\") sys.exit(1) else:", "{} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\")", "else: cur_config = {} for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr]", "os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return yaml.load(in_handle) return {}", "command line. \"\"\" install_config = _get_config_file(args) if install_config is None: return if os.path.exists(install_config)", "def docker_image_arg(args): if not hasattr(args, \"image\") or not args.image: default_args = _get_install_defaults(args) args", "mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker image and", "a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)),", "args.image: return raise ValueError(\"Could not find docker image %s in local repository\" %", "to use the docker index server but upload size is currently smaller with", "= [] for x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x))", "\"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save", "not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\":", "with latest %s\" % \" and \".join(updates)) else: print(\"\\nNo update targets specified, need", "with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if", "\"image\") or not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return", "parts[0] == args.image: return raise ValueError(\"Could not find docker image %s in local", "if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\",", "= getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val)", "def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0:", "assert args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def", "not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image", "doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data", "latest %s\" % \" and \".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper',", "if args.install_data: if len(args.genomes) == 0: print(\"Data not installed, no genomes provided with", "a bcbio-nextgen installation. \"\"\" from __future__ import print_function import os import subprocess import", "args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add previously", "\"\"\" updates = [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts", "docker image with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert args.image, \"Unspecified", "out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and", "\"\"\" from __future__ import print_function import os import subprocess import sys import yaml", "subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation to be", "yaml.load(in_handle) else: cur_config = {} for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr):", "of docker image and data. \"\"\" updates = [] args = add_install_defaults(args) if", "\"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf): \"\"\"Pull down latest docker image, using export", "and data. \"\"\" updates = [] args = add_install_defaults(args) if args.wrapper: updates.append(\"wrapper scripts\")", "os.path.getsize(install_config) > 0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config =", "tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data not", "return args def _check_docker_image(args): \"\"\"Ensure docker image exists. \"\"\" for image in subprocess.check_output([\"docker\",", "return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not os.path.exists(config_dir): os.makedirs(config_dir) return", "len(args.genomes) == 0: print(\"Data not installed, no genomes provided with `--genomes` flag\") sys.exit(1)", "or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args): clargs =", "wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm;", "== args.image: return raise ValueError(\"Could not find docker image %s in local repository\"", "not installed, no aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args)", "\"image\") or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image =", "to S3 bucket. Long term plan is to use the docker index server", "in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1 and parts[0] ==", "and \".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See", "_add_docker_defaults(args, default_args): if not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and not", "or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"]", "installation. \"\"\" from __future__ import print_function import os import subprocess import sys import", "gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image with code and tools\") # subprocess.check_call([\"docker\",", "if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image =", "and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return", "conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with", "bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update", "args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle:", "_add_docker_defaults(args, default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not", "def _save_install_defaults(args): \"\"\"Save arguments passed to installation to be used on subsequent upgrades.", "command line arguments. \"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for", "'--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more details.\") def _get_cl(args):", "os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\",", "\"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def add_install_defaults(args): \"\"\"Add", "is currently smaller with an exported gzipped image. \"\"\" print(\"Retrieving bcbio-nextgen docker image", "as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not hasattr(args, \"image\")", "if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if x", "installed, no aligners provided with `--aligners` flag\") sys.exit(1) else: updates.append(\"biological data\") _check_docker_image(args) manage.run_bcbio_cmd(args.image,", "defaults to command line arguments. \"\"\" default_args = _get_install_defaults(args) for attr in [\"genomes\",", "in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"]) if args.install_data: if len(args.genomes)", "clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if", "cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\")", "if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners:", "cur_config[attr] = [] for x in getattr(args, attr): if x not in cur_config[attr]:", "dockerconf): \"\"\"Full installaction of docker image and data. \"\"\" updates = [] args", "or not args.image: default_args = _get_install_defaults(args) args = _add_docker_defaults(args, default_args) _check_docker_image(args) return args", "args.install_data: if len(args.genomes) == 0: print(\"Data not installed, no genomes provided with `--genomes`", "print(\"Retrieving bcbio-nextgen docker image with code and tools\") # subprocess.check_call([\"docker\", \"pull\", image]) assert", "% \" and \".join(updates)) else: print(\"\\nNo update targets specified, need '--wrapper', '--tools' or", "install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config)", "\"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\") if not os.path.exists(conda_bin): print(\"Cannot update bcbio-nextgen-vm; not installed", "installed with conda\") else: subprocess.check_call([conda_bin, \"install\", \"--yes\", \"-c\", \"https://conda.binstar.org/bcbio\", \"bcbio-nextgen-vm\"]) def pull(args, dockerconf):", "for attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val = getattr(args,", "import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed to installation to", "default_args) _check_docker_image(args) return args def _get_config_file(args): config_dir = os.path.join(args.datadir, \"config\") if not os.path.exists(config_dir):", "exists. \"\"\" for image in subprocess.check_output([\"docker\", \"images\"]).split(\"\\n\"): parts = image.split() if len(parts) >", "not find docker image %s in local repository\" % args.image) def docker_image_arg(args): if", "and third party tools\") pull(args, dockerconf) _check_docker_image(args) # Ensure external galaxy configuration in", "if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image != DEFAULT_IMAGE and args.image: cur_config[\"image\"]", "yaml from bcbiovm.docker import manage, mounts DEFAULT_IMAGE = \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full", "def full(args, dockerconf): \"\"\"Full installaction of docker image and data. \"\"\" updates =", "targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade -h' for more", "dockerconf): \"\"\"Pull down latest docker image, using export uploaded to S3 bucket. Long", "elif len(args.aligners) == 0: print(\"Data not installed, no aligners provided with `--aligners` flag\")", "return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade bcbio-nextgen-vm wrapper code. \"\"\" conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), \"conda\")", "= mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third party tools\") pull(args, dockerconf)", "= {} for attr in [\"genomes\", \"aligners\"]: if not cur_config.get(attr): cur_config[attr] = []", "\"pull\", image]) assert args.image, \"Unspecified image name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"],", "arguments passed to installation to be used on subsequent upgrades. Avoids needing to", "args def add_install_defaults(args): \"\"\"Add previously saved installation defaults to command line arguments. \"\"\"", "dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and", "open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config = {} for attr in", "default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args def", "Long term plan is to use the docker index server but upload size", "plan is to use the docker index server but upload size is currently", "export uploaded to S3 bucket. Long term plan is to use the docker", "in args.genomes: clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def", "else: print(\"\\nNo update targets specified, need '--wrapper', '--tools' or '--data'\\n\" \"See 'bcbio_vm.py upgrade", "using export uploaded to S3 bucket. Long term plan is to use the", "if not hasattr(args, \"image\") or not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") ==", "Ensure external galaxy configuration in sync when doing tool upgrade manage.run_bcbio_cmd(args.image, dmounts, [\"upgrade\"])", "setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker", "name for docker import\" subprocess.check_call([\"docker\", \"import\", dockerconf[\"image_url\"], args.image]) def _save_install_defaults(args): \"\"\"Save arguments passed", "len(parts) > 1 and parts[0] == args.image: return raise ValueError(\"Could not find docker", "in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return args", "\"\"\"Full installaction of docker image and data. \"\"\" updates = [] args =", "ValueError(\"Could not find docker image %s in local repository\" % args.image) def docker_image_arg(args):", "not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return", "manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args)) _save_install_defaults(args) if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \"", "and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle: return yaml.load(in_handle) return", "installation defaults to command line arguments. \"\"\" default_args = _get_install_defaults(args) for attr in", "_get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle:", "\"images\"]).split(\"\\n\"): parts = image.split() if len(parts) > 1 and parts[0] == args.image: return", "__future__ import print_function import os import subprocess import sys import yaml from bcbiovm.docker", "if len(parts) > 1 and parts[0] == args.image: return raise ValueError(\"Could not find", "if len(args.genomes) == 0: print(\"Data not installed, no genomes provided with `--genomes` flag\")", "attr, new_val) args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args): \"\"\"Ensure docker image", "> 0: with open(install_config) as in_handle: cur_config = yaml.load(in_handle) else: cur_config = {}", "== 0: print(\"Data not installed, no genomes provided with `--genomes` flag\") sys.exit(1) elif", "not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image = DEFAULT_IMAGE return args", "default_args.get(attr, []): new_val = getattr(args, attr) if x not in getattr(args, attr): new_val.append(x)", "index server but upload size is currently smaller with an exported gzipped image.", "[\"upgrade\"]) if args.install_data: if len(args.genomes) == 0: print(\"Data not installed, no genomes provided", "image %s in local repository\" % args.image) def docker_image_arg(args): if not hasattr(args, \"image\")", "= \"chapmanb/bcbio-nextgen-devel\" def full(args, dockerconf): \"\"\"Full installaction of docker image and data. \"\"\"", "updates.append(\"wrapper scripts\") upgrade_bcbio_vm() dmounts = mounts.prepare_system(args.datadir, dockerconf[\"biodata_dir\"]) if args.install_tools: updates.append(\"bcbio-nextgen code and third", "upgrades. Avoids needing to re-include genomes and aligners on command line. \"\"\" install_config", "for x in default_args.get(attr, []): new_val = getattr(args, attr) if x not in", "genomes and aligners on command line. \"\"\" install_config = _get_config_file(args) if install_config is", "with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config =", "if updates: print(\"\\nbcbio-nextgen-vm updated with latest %s\" % \" and \".join(updates)) else: print(\"\\nNo", "is None: return if os.path.exists(install_config) and os.path.getsize(install_config) > 0: with open(install_config) as in_handle:", "_get_cl(args): clargs = [\"upgrade\"] if args.install_data: clargs.append(\"--data\") for g in args.genomes: clargs.extend([\"--genomes\", g])", "args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else: args.image", "open(install_config) as in_handle: return yaml.load(in_handle) return {} def _add_docker_defaults(args, default_args): if not hasattr(args,", "return args def add_install_defaults(args): \"\"\"Add previously saved installation defaults to command line arguments.", "args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)", "_get_install_defaults(args) for attr in [\"genomes\", \"aligners\"]: for x in default_args.get(attr, []): new_val =", "image.split() if len(parts) > 1 and parts[0] == args.image: return raise ValueError(\"Could not", "installed, no genomes provided with `--genomes` flag\") sys.exit(1) elif len(args.aligners) == 0: print(\"Data", "clargs.extend([\"--genomes\", g]) for a in args.aligners: clargs.extend([\"--aligners\", a]) return clargs def upgrade_bcbio_vm(): \"\"\"Upgrade", "and args.image: cur_config[\"image\"] = args.image with open(install_config, \"w\") as out_handle: yaml.dump(cur_config, out_handle, default_flow_style=False,", "not args.image: if default_args.get(\"image\") and not default_args.get(\"images\") == \"None\": args.image = default_args[\"image\"] else:", "attr): new_val.append(x) setattr(args, attr, new_val) args = _add_docker_defaults(args, default_args) return args def _check_docker_image(args):", "for x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(str(x)) if args.image", "= image.split() if len(parts) > 1 and parts[0] == args.image: return raise ValueError(\"Could", "previously saved installation defaults to command line arguments. \"\"\" default_args = _get_install_defaults(args) for", "default_flow_style=False, allow_unicode=False) def _get_install_defaults(args): install_config = _get_config_file(args) if install_config and os.path.exists(install_config) and os.path.getsize(install_config)" ]
[ "None, cuisine: str = None) -> List[Dict]: query = \"MATCH (a:recipe) WITH rand()", "degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, # (case when minus_degree0>=1 then 1", "rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if res.empty: return None", "'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query +=", "'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon',", "== 'halal': query += \"MATCH (rep) WHERE rep.halal is null \" elif dietary", "[] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test", "dietaryList is not None: for dietary in dietaryList: if dietary == 'halal': query", "i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY degree\" for i in", "'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken',", "# Unit Test 3 ######################################## # rep = 'super Fruity Smoothie' # print(getIngredient(rep))", "= None) -> List[Dict]: query = \"MATCH (a:recipe) WITH rand() as r, a", "MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def random_init(length", "query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY degree\"", "BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res) recipes =", "graph.run(query) res = pd.DataFrame(res) recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return", "+ size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}}))", "name in ingr} query_indegree = \"WITH \" for i in range(n): query_indegree +=", "minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep:", "MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) #", "browser(topk: int = 10, dietaryList: List[str] = None, cuisine: str = None) ->", "a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \" for i", "= query_indegree[:-1] + \" RETURN \" for i in range(n): query_indegree += \"a{0},\".format(str(i))", "+= \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \" for i in", "(it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient',", "print(it, ' is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand =", "i in range(n): query += \"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i))", "'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine',", "LIMIT {0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res) recipes = [] for", "1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample", "for i in range(n): query += \"(case when minus_degree{0}>=1 then 1 else 0", "List[Dict]: n = len(ingr) if (n == 0): return [{}] ingr_type = {}", "+ ' ' query += \"RETURN rep, \" for i in range(n): query", "+= \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \"", "then 1 else 0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree", "= query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in enumerate(ingr):", "degree\" for i in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\"", "'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter", "query += \",\" for i in range(n): query += \"(case when minus_degree{0}>=1 then", "range(n): query += \"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query =", "WITH rep, r0, i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient)))", "\" if dietaryList is not None: for dietary in dietaryList: if dietary ==", "str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep,", "query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary ==", "\"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \" for i in range(n):", "a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res) ingrs = [] for i", "main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for", "= {name: 0 for name in ingr} query_indegree = \"WITH \" for i", "r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'}))", "str(i)) query += \"degree ORDER BY degree\" for i in range(n): query +=", "id) res = graph.run(query) res = pd.DataFrame(res) ingrs = [] for i in", "(case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1 else", "'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int = 10, dietaryList: List[str]", "= {} for it in ingr: it = it.lower() if it in main_ingr:", "3 ######################################## # rep = 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query", "Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a", "'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon',", "Fruity Smoothie' # RETURN a # def random_init(length = 50): # query =", "query += \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree,", "# res = graph.run(query) # res = pd.DataFrame(res) # for i in range(res.shape[0]):", "# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep,", "pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int", "for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 3 ########################################", "minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 *", "in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])", "== 0): return [{}] ingr_type = {} for it in ingr: it =", "' is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name:", "'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin',", "dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None:", "# print(res) recipes = [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes", "'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary", "= \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query)", "it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient')", "pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ######################################## # Unit Test 2 ########################################", "for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3 ########################################", "= it.lower() if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, '", "return res.iloc[0,0] ######################################## # Unit Test 2 ######################################## # rep = 'super Fruity", "query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in enumerate(ingr): cand[name]", "Test 3 ######################################## # rep = 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample", "'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH", "getRecipes( ingr: List[str], topk: int = 10, dietaryList: List[str] = None, cuisine: str", "if cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query +=", "query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i in range(n): query", "\".format(cuisine) query += \"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query) res =", "dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name:", "rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN", "elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian':", "Graph, NodeMatcher import pandas as pd from operator import itemgetter from typing import", "x : x[1]) query = '' for i in range(n): query += \"OPTIONAL", "'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango',", "dietaryList: if dietary == 'halal': query += \"MATCH (rep) WHERE rep.halal is null", "dietary == 'halal': query += \"MATCH (rep) WHERE rep.halal is null \" elif", "res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query # query", "\"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res", "not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER", "cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN", "elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not", "when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25; # ''' def", "import itemgetter from typing import List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\",", "itemgetter from typing import List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>')", "dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query", "\"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if", "indegrees = pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr =", "minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case", "# Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN", "def getIngredient(id: str, rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}'", "if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main", "dietary in dietaryList: if dietary == 'halal': query += \"MATCH (rep) WHERE rep.halal", "str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary in dietaryList:", "it = it.lower() if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it,", "rep = 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient)", "i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int = 10, dietaryList:", "RETURN a # def random_init(length = 50): # query = \"MATCH (n:recipe) RETURN", "MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None:", "def browser(topk: int = 10, dietaryList: List[str] = None, cuisine: str = None)", "i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees", "minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query = query[:-1] + \" desc\" query", "cuisine: str = None) -> List[Dict]: query = \"MATCH (a:recipe) WITH rand() as", "'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball',", "rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res) ingrs = []", "\"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \"", "py2neo import Node, Relationship, Graph, NodeMatcher import pandas as pd from operator import", "'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili',", "Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep", "MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id: str,", "dietary in dietaryList: if dietary == 'halal': query += \"MATCH (a) WHERE a.halal", "recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1 ######################################## # res = getRecipes(['apple','banana',", "rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if res.empty: return None return res.iloc[0,0]", "0) -> List[Dict]: n = len(ingr) if (n == 0): return [{}] ingr_type", "WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'})", "for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])", "(n == 0): return [{}] ingr_type = {} for it in ingr: it", "\"degree ORDER BY degree\" for i in range(n): query += \"-minus_degree{0} * 2\".format(str(i))", "\" for i in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query +=", "i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 3 ######################################## #", "topk) print(query) res = graph.run(query) res = pd.DataFrame(res) # print(res) recipes = []", "str(i), str(i)) query += \"degree ORDER BY degree\" for i in range(n): query", "+ \" desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query)", "WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res)", "range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] +", "melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck',", "print(type(res[0])) # Sample query # query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name:", "ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for name in ingr}", "# ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) #", "for it in ingr: it = it.lower() if it in main_ingr: ingr_type[it] =", "WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if res.empty: return", "pd.DataFrame(res) recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## #", "print(query) res = graph.run(query) res = pd.DataFrame(res) # print(res) recipes = [] for", "# res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query #", "\"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query", "query += \",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query)", "= \"WITH \" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i),", "AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res) ingrs =", "rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def random_init(length = 50): # query", "in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else:", "query += \"WITH rep, \" for i in range(n): query += \"r{0}, i{1},", "# Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN", "r, a \" if dietaryList is not None: for dietary in dietaryList: if", "enum from pandas.io.pytables import DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher import", "i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree =", "len(ingr) if (n == 0): return [{}] ingr_type = {} for it in", "minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree #", "0 end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str) -> Dict: query", "elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian':", "rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 *", "query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n): query", "WHERE a.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan' query", "topk: int = 10, dietaryList: List[str] = None, cuisine: str = None, skip:", "'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion',", "'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan,", "+= \"MATCH (rep) WHERE rep.halal is null \" elif dietary == 'vegetarian': vegan", "None: for dietary in dietaryList: if dietary == 'halal': query += \"MATCH (a)", "'halal': query += \"MATCH (a) WHERE a.halal is null \" elif dietary ==", "a ORDER BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res)", "'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi',", "as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree", "\" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1],", "ORDER BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res) recipes", "res = graph.run(query) res = pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]):", "# WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def random_init(length = 50):", "in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER", "'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) # WHERE", "NodeMatcher import pandas as pd from operator import itemgetter from typing import List,", "as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1,", "not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \"", "DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher import pandas as pd from", "in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3 ######################################## # rep", "random_set[i] = res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str] = None, cuisine:", "######################################## # Unit Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese')", "as pd from operator import itemgetter from typing import List, Dict import random", "def getRecipes( ingr: List[str], topk: int = 10, dietaryList: List[str] = None, cuisine:", "res = pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs", "MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'}))", "i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name:", "Smoothie' # print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity", "return None return res.iloc[0,0] ######################################## # Unit Test 2 ######################################## # rep =", "'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes(", "(rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1, i1, r2, i2, rs, #", "getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res", "'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion',", "import List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple',", "query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary ==", "query[:-1] + \" desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip * topk)", "\"MATCH (a:recipe) WITH rand() as r, a \" if dietaryList is not None:", "\"MATCH (rep) WHERE rep.halal is null \" elif dietary == 'vegetarian': vegan =", "# OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name:", "i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3 ######################################## #", "rep, \" for i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i))", "in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 3 ######################################## # print(browser(dietaryList=['halal','fruitarian'],", "(rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def random_init(length =", "MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1,", "recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit", "then 1 else 0 end)+\".format(str(i)) query = query[:-1] + \" desc\" query +=", "[{}] ingr_type = {} for it in ingr: it = it.lower() if it", "2, # (case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then", "Fruity Smoothie' # RETURN rep def getIngredient(id: str, rep: str) -> List[str]: query", "query = \"MATCH (a:recipe) WITH rand() as r, a \" if dietaryList is", "-> List[Dict]: query = \"MATCH (a:recipe) WITH rand() as r, a \" if", "str, rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}'", "-> List[Dict]: n = len(ingr) if (n == 0): return [{}] ingr_type =", "\"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name:", "i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1", "\"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if", "size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as", "WITH rand() as r, a \" if dietaryList is not None: for dietary", "* topk) print(query) res = graph.run(query) res = pd.DataFrame(res) # print(res) recipes =", "= pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand,", "= graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0]", "range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3 ######################################## # rep =", "as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name:", "0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1", "\"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY r LIMIT {0};\".format(topk)", "+= \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian':", "query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList", "######################################## # Unit Test 3 ######################################## # rep = 'super Fruity Smoothie' #", "cand = {name: 0 for name in ingr} query_indegree = \"WITH \" for", "'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int = 10, dietaryList:", "dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name:", "OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name:", "end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str) -> Dict: query =", "= Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot',", "1 else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1", "if cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query +=", "Unit Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0]))", "(a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query)", "i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x :", "'{0}'}}) \".format(cuisine) query += \"WITH rep, \" for i in range(n): query +=", "+= \"WITH rep, \" for i in range(n): query += \"r{0}, i{1}, \".format(str(i),", "then 1 else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when", "LIMIT {0}\".format(str(length)) # res = graph.run(query) # res = pd.DataFrame(res) # for i", "in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str]", "# rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH", "+= \"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res", "'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1, i1, r2,", "Unit Test 3 ######################################## # rep = 'super Fruity Smoothie' # print(getIngredient(rep)) #", "== 'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}})", "range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i in range(n):", "\"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}})", "in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1 ######################################## #", "Fruity Smoothie' # print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super", "+= \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n): query +=", "'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry',", "'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper',", "'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas',", "\"WITH rep, \" for i in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i))", "''' def getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN", "= pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ######################################## # Unit Test 2", "\"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query = query[:-1] + \"", "WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1, i1, r2, i2, rs,", "+= \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i in range(n): query +=", "+= \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + '", "res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] =", "vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan)", "# print(type(res[0])) # Sample query # query = # ''' # OPTIONAL MATCH", "\"RETURN rep, \" for i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i),", "str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n):", "is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a", "ingr: it = it.lower() if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient')", "if dietaryList is not None: for dietary in dietaryList: if dietary == 'halal':", "RETURN a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res) ingrs = [] for", "'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom',", "+= \"RETURN rep, \" for i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i),", "+= \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY r LIMIT", "for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1", "# WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1, i1, r2, i2,", "= None, cuisine: str = None, skip: int = 0) -> List[Dict]: n", "cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH", "in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query =", "print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' #", "== 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query", "(rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res =", "= 10, dietaryList: List[str] = None, cuisine: str = None) -> List[Dict]: query", "# RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER", "= \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res)", "res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1 ######################################## # res =", "dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None:", "= 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) #", "+= \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY degree\" for", "None: for dietary in dietaryList: if dietary == 'halal': query += \"MATCH (rep)", "# (case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1", "# print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie'", "query += \"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query)", "'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood',", "graph.run(query) res = pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return", "query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \"", "+= \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i,", "query += \"MATCH (rep) WHERE rep.halal is null \" elif dietary == 'vegetarian':", "pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## #", "\"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not", "in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i in", "# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1, rs,", "= res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str] = None, cuisine: str", "query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res =", "List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana',", "WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def random_init(length = 50): #", "'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork',", "rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name:", "skip: int = 0) -> List[Dict]: n = len(ingr) if (n == 0):", "if dietary == 'halal': query += \"MATCH (rep) WHERE rep.halal is null \"", "in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient)))", "res = graph.run(query) # res = pd.DataFrame(res) # for i in range(res.shape[0]): #", "\".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary", "######################################## # rep = 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query #", "in dietaryList: if dietary == 'halal': query += \"MATCH (a) WHERE a.halal is", "i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, #", "OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'})", "25;\".format(skip * topk) print(query) res = graph.run(query) res = pd.DataFrame(res) # print(res) recipes", "else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for name in", "= 50): # query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res", "elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not", "None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY", "recipes = [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## #", "pandas.io.pytables import DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher import pandas as", "import pandas as pd from operator import itemgetter from typing import List, Dict", "= pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ########################################", "dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query # query = # ''' #", "minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1 else 0 end)", "'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham',", "in ingr: it = it.lower() if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient',", "in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1]", "'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int = 10, dietaryList: List[str] =", "typing import List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr =", "query = '' for i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}}))", "Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) #", "size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2,", "* 2\".format(str(i)) query += \",\" for i in range(n): query += \"(case when", "query += \"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query = query[:-1]", "query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res =", "rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id: str, rep: str) -> List[str]:", "\".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary in", "{0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res) recipes = [] for i", "= [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test", "Unit Test 2 ######################################## # rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) #", "1 else 0 end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str) ->", "query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for", "set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes',", "username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut',", "Smoothie' # RETURN rep def getIngredient(id: str, rep: str) -> List[str]: query =", "[] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 3", "query += \"MATCH (a) WHERE a.halal is null \" elif dietary == 'vegetarian':", "int = 0) -> List[Dict]: n = len(ingr) if (n == 0): return", "query # query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL", "# MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0,", "graph.run(query) # res = pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i] =", "= set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish',", "0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25; #", "graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr", "rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0", "* 2-minus_degree2 * 2, # (case when minus_degree0>=1 then 1 else 0 end)+(case", "n LIMIT {0}\".format(str(length)) # res = graph.run(query) # res = pd.DataFrame(res) # for", "res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str] = None, cuisine: str =", "'vegan'}) # WITH rep, r0, i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient))", "= 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) #", "def random_init(length = 50): # query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length))", "query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH", "\" elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}})", "'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name:", "((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE", "(a) WHERE a.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan'", "i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query", "== 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query +=", "'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef',", "key=lambda x : x[1]) query = '' for i in range(n): query +=", "\" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2])", "2\".format(str(i)) query += \",\" for i in range(n): query += \"(case when minus_degree{0}>=1", "'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand", "= query[:-1] + \" desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip *", "'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes',", "(a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query +=", "'halal': query += \"MATCH (rep) WHERE rep.halal is null \" elif dietary ==", "'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk:", "# MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id:", "sorted_ingr = sorted(cand, key=lambda x : x[1]) query = '' for i in", "'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0,", "'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic',", "-> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query)", "query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] +", "def getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep)", "query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a #", "' query += \"RETURN rep, \" for i in range(n): query += \"r{0},", "from typing import List, Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr", "= None, skip: int = 0) -> List[Dict]: n = len(ingr) if (n", "graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage',", "\"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' '", "int = 10, dietaryList: List[str] = None, cuisine: str = None) -> List[Dict]:", "'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes',", "BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, # (case when minus_degree0>=1 then", "dietaryList: if dietary == 'halal': query += \"MATCH (a) WHERE a.halal is null", "= \"MATCH (a:recipe) WITH rand() as r, a \" if dietaryList is not", "'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary", "\"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for", "'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH", "return ingrs ######################################## # Unit Test 3 ######################################## # rep = 'super Fruity", "import DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher import pandas as pd", "else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then", "ingrs ######################################## # Unit Test 3 ######################################## # rep = 'super Fruity Smoothie'", "dietaryList: List[str] = None, cuisine: str = None) -> List[Dict]: query = \"MATCH", "end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1 else", "\".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary", "-> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id)", "# res = pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0]", "RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if res.empty: return None return", "for i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0],", "(rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \" for i in range(n): query", "else 0 end)+\".format(str(i)) query = query[:-1] + \" desc\" query += \",degree SKIP", "when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query = query[:-1] + \" desc\"", "= graph.run(query) res = pd.DataFrame(res) ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name'])", "(a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine)", "== 'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}})", "query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name:", "res = pd.DataFrame(res) recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes", "for name in ingr} query_indegree = \"WITH \" for i in range(n): query_indegree", "RETURN \" for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res", "main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg',", "rep, \" for i in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query", "= graph.run(query) res = pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ######################################## #", "query[:-1] + ' ' query += \"RETURN rep, \" for i in range(n):", "query += \"degree ORDER BY degree\" for i in range(n): query += \"-minus_degree{0}", "MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH", "pd.DataFrame(res) for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda", "range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY", "i in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i", "+= \"MATCH (a) WHERE a.halal is null \" elif dietary == 'vegetarian': vegan", "WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id: str, rep: str) ->", "int = 10, dietaryList: List[str] = None, cuisine: str = None, skip: int", "'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage',", "it in ingr: it = it.lower() if it in main_ingr: ingr_type[it] = (it.upper(),", "else 0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25;", "2-minus_degree1 * 2-minus_degree2 * 2, # (case when minus_degree0>=1 then 1 else 0", "graph.run(query) res = pd.DataFrame(res) # print(res) recipes = [] for i in range(min(topk,", "= 0) -> List[Dict]: n = len(ingr) if (n == 0): return [{}]", "None) -> List[Dict]: query = \"MATCH (a:recipe) WITH rand() as r, a \"", "ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3 ######################################## # rep = 'super", "0 for name in ingr} query_indegree = \"WITH \" for i in range(n):", "pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk',", "None return res.iloc[0,0] ######################################## # Unit Test 2 ######################################## # rep = 'super", "from pandas.io.pytables import DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher import pandas", "not None: for dietary in dietaryList: if dietary == 'halal': query += \"MATCH", "'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2", "import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper',", "'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int", "str = None) -> List[Dict]: query = \"MATCH (a:recipe) WITH rand() as r,", "1 else 0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT", "else 0 end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str) -> Dict:", "range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as", "res = graph.run(query) res = pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ########################################", "None, cuisine: str = None, skip: int = 0) -> List[Dict]: n =", "degree, \" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i),", "Dict import random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell", "degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'}))", "for i in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient))", "'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple',", "\"MATCH (a) WHERE a.halal is null \" elif dietary == 'vegetarian': vegan =", "as r, a \" if dietaryList is not None: for dietary in dietaryList:", "{} for it in ingr: it = it.lower() if it in main_ingr: ingr_type[it]", "+= \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is", "r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0,", "'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat',", "\"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query", "'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla'])", "((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep,", "######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query", "\".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in", "res = pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def", "elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE", "RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query) # res = pd.DataFrame(res) #", "1 else 0 end)+\".format(str(i)) query = query[:-1] + \" desc\" query += \",degree", "Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res", "Relationship, Graph, NodeMatcher import pandas as pd from operator import itemgetter from typing", "= '' for i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i),", "\" if cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query", "i in range(n): query += \"r{0}, i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) +", "res = graph.run(query) res = pd.DataFrame(res) # print(res) recipes = [] for i", "ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, # (case when minus_degree0>=1", "ingr} query_indegree = \"WITH \" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as", "size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0,", "is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep,", "\"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query) # res =", "vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan)", "Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese',", "\"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY degree\" for i", "size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as", "(rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine", "dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query", "return [{}] ingr_type = {} for it in ingr: it = it.lower() if", "'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread',", "# def random_init(length = 50): # query = \"MATCH (n:recipe) RETURN n LIMIT", "for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree)", "minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query += \"RETURN", "for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree", "Node, Relationship, Graph, NodeMatcher import pandas as pd from operator import itemgetter from", "as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 #", "# random_set[i] = res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str] = None,", "+= \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian':", "str) -> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res =", "a \" if dietaryList is not None: for dietary in dietaryList: if dietary", "'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant',", "ingr_type = {} for it in ingr: it = it.lower() if it in", "operator import itemgetter from typing import List, Dict import random graph = Graph(\"http://localhost:7474\",", "'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str],", "'{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif", "print(res) recipes = [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ########################################", "'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges',", "from py2neo import Node, Relationship, Graph, NodeMatcher import pandas as pd from operator", "range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1 ######################################## # res", "i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2", "2-minus_degree2 * 2, # (case when minus_degree0>=1 then 1 else 0 end)+(case when", "\"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}})", "melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def", "r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 *", "# OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'})", "'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon',", "'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int = 10,", "# Unit Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') #", "'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE", "# OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH", "'{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH", "i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree", "'{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif", "res.iloc[0,0] ######################################## # Unit Test 2 ######################################## # rep = 'super Fruity Smoothie'", "0): return [{}] ingr_type = {} for it in ingr: it = it.lower()", "None, skip: int = 0) -> List[Dict]: n = len(ingr) if (n ==", "ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary in dietaryList: if", "x[1]) query = '' for i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name:", "for i in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query += \",\" for", "((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) #", "# WITH rep, r0, i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) +", "'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear',", "25; # ''' def getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe) WHERE", "(rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \"", "res.empty: return None return res.iloc[0,0] ######################################## # Unit Test 2 ######################################## # rep", "= [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test", "\" elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}})", "end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25; # '''", "(it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for name in ingr} query_indegree =", "+= \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name:", "rand() as r, a \" if dietaryList is not None: for dietary in", "if res.empty: return None return res.iloc[0,0] ######################################## # Unit Test 2 ######################################## #", "RETURN rep def getIngredient(id: str, rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient)", "'{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query", "'{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH", "'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn',", "50): # query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res =", "'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2,", "as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query +=", "'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce',", "res = pd.DataFrame(res) # print(res) recipes = [] for i in range(min(topk, res.shape[0])):", "end)+\".format(str(i)) query = query[:-1] + \" desc\" query += \",degree SKIP {0} LIMIT", "+ size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1,", "query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \" for i", "rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res = graph.run(query) res = pd.DataFrame(res) ingrs", "range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int = 10, dietaryList: List[str] =", "'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr:", "(a:recipe) WITH rand() as r, a \" if dietaryList is not None: for", "'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach',", "\" for i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query", "query = query[:-1] + \" desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip", "dietaryList: List[str] = None, cuisine: str = None, skip: int = 0) ->", "= indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1]) query = '' for", "\" if cuisine is not None: query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query", "'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce',", "it.lower() if it in main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is", "\"WITH \" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1],", "null \" elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name:", "Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity", "'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb',", "print(query) res = graph.run(query) res = pd.DataFrame(res) recipes = [] for i in", "minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN", "enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1]) query =", "== 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query +=", "in range(n): query += \"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query", "# Unit Test 2 ######################################## # rep = 'super Fruity Smoothie' # print(getRecipeByName(rep))", "query = query[:-1] + ' ' query += \"RETURN rep, \" for i", "\" for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res =", "'ingredient') cand = {name: 0 for name in ingr} query_indegree = \"WITH \"", "import enum from pandas.io.pytables import DuplicateWarning from py2neo import Node, Relationship, Graph, NodeMatcher", "\".format(cuisine) query += \"WITH rep, \" for i in range(n): query += \"r{0},", "# ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, # (case when", "range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res)", "Sample query # query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) #", "== 'halal': query += \"MATCH (a) WHERE a.halal is null \" elif dietary", "= 10, dietaryList: List[str] = None, cuisine: str = None, skip: int =", "\" elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is", "= query[:-1] + ' ' query += \"RETURN rep, \" for i in", "######################################## # rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query #", "+= \",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query) res", "is null \" elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH", "ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for name", "when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1 else 0", "List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a\".format(rep, id) res", "n = len(ingr) if (n == 0): return [{}] ingr_type = {} for", "# (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'}))", "for dietary in dietaryList: if dietary == 'halal': query += \"MATCH (rep) WHERE", "query_indegree[:-1] + \" RETURN \" for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree", "query += \"MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY r", "a # def random_init(length = 50): # query = \"MATCH (n:recipe) RETURN n", "'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query +=", "query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name in", "'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger',", "WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'})", "LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query) res = pd.DataFrame(res) # print(res)", "'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient')", "(rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WITH rep, r0, i0, r1, i1,", "{name: 0 for name in ingr} query_indegree = \"WITH \" for i in", "query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH", "cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1]) query = ''", "'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells',", "######################################## # Unit Test 2 ######################################## # rep = 'super Fruity Smoothie' #", "SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query) res = pd.DataFrame(res)", "ingr: List[str], topk: int = 10, dietaryList: List[str] = None, cuisine: str =", "in ingr} query_indegree = \"WITH \" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}}))", "i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, #", "getIngredient(id: str, rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND", "Smoothie' # RETURN a # def random_init(length = 50): # query = \"MATCH", "= 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif", "random_init(length = 50): # query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) #", "{0} LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query) res = pd.DataFrame(res) #", "res = pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ######################################## # Unit Test", "\"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res =", "10, dietaryList: List[str] = None, cuisine: str = None, skip: int = 0)", "'{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary", "\"-minus_degree{0} * 2\".format(str(i)) query += \",\" for i in range(n): query += \"(case", "= pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk:", "null \" elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name:", "cuisine='chinese') # print(type(res[0])) # Sample query # query = # ''' # OPTIONAL", "'{0}'}}) \".format(cuisine) query += \"RETURN a ORDER BY r LIMIT {0};\".format(topk) print(query) res", "size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1, #", "ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \" for i in range(n):", "for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int = 10,", "0 end)+\".format(str(i)) query = query[:-1] + \" desc\" query += \",degree SKIP {0}", "'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp',", "'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon',", "RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree # ORDER BY", "\"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if", "'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla']) def getRecipes( ingr: List[str], topk: int =", "(rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query +=", "str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \" for i in", "is not None: for dietary in dietaryList: if dietary == 'halal': query +=", "= pd.DataFrame(res) recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ########################################", "range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if", "range(res.shape[0]): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 3 ######################################## # print(browser(dietaryList=['halal','fruitarian'], cuisine='chinese'))", "a.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan' query +=", "r LIMIT {0};\".format(topk) print(query) res = graph.run(query) res = pd.DataFrame(res) recipes = []", "\" elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is", "str = None, skip: int = 0) -> List[Dict]: n = len(ingr) if", "{0}\".format(str(length)) # res = graph.run(query) # res = pd.DataFrame(res) # for i in", "List[str] = None, cuisine: str = None) -> List[Dict]: query = \"MATCH (a:recipe)", "query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'})", "(rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id: str, rep:", "# RETURN a # def random_init(length = 50): # query = \"MATCH (n:recipe)", "= sorted(cand, key=lambda x : x[1]) query = '' for i in range(n):", "WHERE rep.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan' query", "= [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit", "\" RETURN \" for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1]", "range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1]", "BY degree\" for i in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query +=", "'APPLE'})) as minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0,", "query_indegree = query_indegree[:-1] + \" RETURN \" for i in range(n): query_indegree +=", "for i in range(n): query += \"r{0}, i{1}, minus_degree{2},\".format(str(i), str(i), str(i)) query +=", "+= \"degree ORDER BY degree\" for i in range(n): query += \"-minus_degree{0} *", "(a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine", "List[str], topk: int = 10, dietaryList: List[str] = None, cuisine: str = None,", "# print(getIngredient(rep)) # Sample query # MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie'", "pandas as pd from operator import itemgetter from typing import List, Dict import", "import Node, Relationship, Graph, NodeMatcher import pandas as pd from operator import itemgetter", "when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1 else 0", "from operator import itemgetter from typing import List, Dict import random graph =", "random graph = Graph(\"http://localhost:7474\", username=\"neo4j\", password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli',", "= getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query # query =", "print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' #", "+= \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN", "# RETURN rep def getIngredient(id: str, rep: str) -> List[str]: query = \"MATCH", "(rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine)", "minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2, # (case", "pd.DataFrame(res) # print(res) recipes = [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return", ": x[1]) query = '' for i in range(n): query += \"OPTIONAL MATCH", "= None, cuisine: str = None) -> List[Dict]: query = \"MATCH (a:recipe) WITH", "List[str] = None, cuisine: str = None, skip: int = 0) -> List[Dict]:", "'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries',", "'Has_Ingredient', 'ingredient') cand = {name: 0 for name in ingr} query_indegree = \"WITH", "= pd.DataFrame(res) # print(res) recipes = [] for i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0])", "\"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees = pd.DataFrame(res) for i, name", "' ' query += \"RETURN rep, \" for i in range(n): query +=", "'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query # query = # '''", "as a{1},\".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \" for", "* 2, # (case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1", "i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1],", "= len(ingr) if (n == 0): return [{}] ingr_type = {} for it", "i{1}, \".format(str(i), str(i)) query += \"(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, \" for i", "LIMIT 25; # ''' def getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe)", "# WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def getIngredient(id: str, rep: str)", "(n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query) # res = pd.DataFrame(res)", "rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe)", "query += \"RETURN rep, \" for i in range(n): query += \"r{0}, i{1},", "+= \",\" for i in range(n): query += \"(case when minus_degree{0}>=1 then 1", "10, dietaryList: List[str] = None, cuisine: str = None) -> List[Dict]: query =", "i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as", "'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan,", "# rep = 'super Fruity Smoothie' # print(getIngredient(rep)) # Sample query # MATCH", "(rep) WHERE rep.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan'", "= \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query) # res", "'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice',", "\",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res = graph.run(query) res =", "in dietaryList: if dietary == 'halal': query += \"MATCH (rep) WHERE rep.halal is", "OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) # WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'}) #", "= graph.run(query) # res = pd.DataFrame(res) # for i in range(res.shape[0]): # random_set[i]", "Test 2 ######################################## # rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample", "'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter", "sorted(cand, key=lambda x : x[1]) query = '' for i in range(n): query", "then 1 else 0 end) desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str)", "ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query += \"RETURN rep, \"", "for dietary in dietaryList: if dietary == 'halal': query += \"MATCH (a) WHERE", "desc,degree LIMIT 25; # ''' def getRecipeByName(rep: str) -> Dict: query = \"MATCH", "= graph.run(query) res = pd.DataFrame(res) # print(res) recipes = [] for i in", "= 'vegan' query += \"MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif", "rep def getIngredient(id: str, rep: str) -> List[str]: query = \"MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE", "dietary == 'halal': query += \"MATCH (a) WHERE a.halal is null \" elif", "# query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query)", "# MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN a # def", "cuisine: str = None, skip: int = 0) -> List[Dict]: n = len(ingr)", "* 2-minus_degree1 * 2-minus_degree2 * 2, # (case when minus_degree0>=1 then 1 else", "# query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH", "List[Dict]: query = \"MATCH (a:recipe) WITH rand() as r, a \" if dietaryList", "minus_degree1,r2, i2, minus_degree2,degree # ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2,", "vegan) elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary ==", "is main ingredient') else: ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0", "rep, r0, i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as", "elif dietary == 'vegetarian': vegan = 'vegan' query += \"MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE", "Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super", "rep.halal is null \" elif dietary == 'vegetarian': vegan = 'vegan' query +=", "+= \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name:", "# ''' def getRecipeByName(rep: str) -> Dict: query = \"MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}'", "in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1]) query", "\",\" for i in range(n): query += \"(case when minus_degree{0}>=1 then 1 else", "(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as", "= # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'}))", "i in range(min(topk, res.shape[0])): recipes.append(res.iloc[i,0]) return recipes ######################################## # Unit Test 1 ########################################", "query # MATCH (rep:recipe) # WHERE rep.Name=~'(?i)super Fruity Smoothie' # RETURN rep def", "+= \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \"", "'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter',", "= (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it] = (it.lower(),", "ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary in dietaryList: if dietary", "''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL", "minus_degree1, # size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2 # RETURN rep, r0, i0, minus_degree0,r1, i1,", "= graph.run(query) res = pd.DataFrame(res) recipes = [] for i in range(res.shape[0]): recipes.append(res.iloc[i,0])", "(a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) \".format(vegan, vegan) elif dietary == 'fruitarian': query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \"", "ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query += \"RETURN rep, \" for", "ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for dietary in dietaryList: if dietary ==", "name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1])", "getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese') # print(type(res[0])) # Sample query # query = #", "indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x : x[1]) query = '' for i", "ingr_type[ingr[i]][2]) query_indegree = query_indegree[:-1] + \" RETURN \" for i in range(n): query_indegree", "'' for i in range(n): query += \"OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i),", "minus_degree{2},\".format(str(i), str(i), str(i)) query += \"degree ORDER BY degree\" for i in range(n):", "if (n == 0): return [{}] ingr_type = {} for it in ingr:", "str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) query = query[:-1] + ' ' query += \"RETURN rep,", "desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res =", "res = graph.run(query) res = pd.DataFrame(res) recipes = [] for i in range(res.shape[0]):", "graph.run(query) res = pd.DataFrame(res) if res.empty: return None return res.iloc[0,0] ######################################## # Unit", "password='<PASSWORD>') main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber',", "((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) \".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2]) if dietaryList is not None: for", "ORDER BY degree\" for i in range(n): query += \"-minus_degree{0} * 2\".format(str(i)) query", "2 ######################################## # rep = 'super Fruity Smoothie' # print(getRecipeByName(rep)) # Sample query", "\" desc\" query += \",degree SKIP {0} LIMIT 25;\".format(skip * topk) print(query) res", "query += \"MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'})", "== 'eggetarian': query += \"MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \" if cuisine is not None: query", "recipes ######################################## # Unit Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'],", "# Sample query # query = # ''' # OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'}))", "# for i in range(res.shape[0]): # random_set[i] = res.iloc[i,0] def browser(topk: int =", "as degree, \" for i in range(n): query += \"size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},\".format(ingr_type[sorted_ingr[i]][0],", "if dietary == 'halal': query += \"MATCH (a) WHERE a.halal is null \"", "'vanilla']) def getRecipes( ingr: List[str], topk: int = 10, dietaryList: List[str] = None,", "= (it.lower(), 'Has_Ingredient', 'ingredient') cand = {name: 0 for name in ingr} query_indegree", "query_indegree = \"WITH \" for i in range(n): query_indegree += \"size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},\".format(ingr_type[ingr[i]][0],", "for i, name in enumerate(ingr): cand[name] = indegrees.iloc[[0],[i]].values[0][0] sorted_ingr = sorted(cand, key=lambda x", "r0, i0, r1, i1, r2, i2, rs, # (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree,", "+ \" RETURN \" for i in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree =", "+= \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary == 'eggetarian': query += \"MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) \"", "in range(n): query_indegree += \"a{0},\".format(str(i)) query_indegree = query_indegree[:-1] res = graph.run(query_indegree) indegrees =", "[] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit Test 3", "query = \"MATCH (n:recipe) RETURN n LIMIT {0}\".format(str(length)) # res = graph.run(query) #", "vegan) elif dietary == 'fruitarian': query += \"MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) \" elif dietary ==", "None: query += \"MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) \".format(cuisine) query += \"WITH rep, \" for", "'BANANA'})) # OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'})) # OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'})) # MATCH", "return recipes ######################################## # Unit Test 1 ######################################## # res = getRecipes(['apple','banana', 'strawberry'],", "ingrs = [] for i in range(res.shape[0]): ingrs.append(res.iloc[i,0]['Name']) return ingrs ######################################## # Unit", "main_ingr: ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it]", "+= \"(case when minus_degree{0}>=1 then 1 else 0 end)+\".format(str(i)) query = query[:-1] +", "(rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep\".format(rep) res = graph.run(query) res = pd.DataFrame(res) if res.empty:", "pd from operator import itemgetter from typing import List, Dict import random graph", "ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient') print(it, ' is main ingredient') else: ingr_type[it] =" ]
[ "if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY:", "dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info)", "1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in", "[sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])],", "new_min_val > current_max_val: new_max_val = new_min_val # range max value may affect sliders", "key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to", "config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY =", "GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS", "key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes()", "default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme", "event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return", "- sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-'", "# GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table", "[sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id,", "[letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator',", "size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY),", "None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) >", "values, new_range): \"\"\"Updates UI part related to words length sliders change their range", "files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for", "values new_range (tuple): New value range Returns: new_min_val, new_max_val (tuple): Updated words length", "if data == lookup_value: result = key break return result def _update_ui_on_dictionary_set_change(self, values):", "0, False), (\"File name\", 14, True), (\"Words\", 6, True), (\"Min len\", 7, True),", "E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY", "orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for", "in files_data_header], visible_column_map=[ visible for _name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False,", "lookup_value: result = key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI", "'-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS", "headings=[name for name, _size, _visible in files_data_header], col_widths=[size for _name, size, _visible in", "quit or window was closed if event == sg.WINDOW_CLOSED: self.window.close() return False #", "should be found nested_key (str): key in nested dictionary where lookup_value is Returns:", "os import sys import PySimpleGUI as sg class CwGenUI: # General # GUI", "list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if (event", "= '-WORDS TO GEN-' # GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT", "containing GUI elements values Returns: None \"\"\" # get current positions slider_min_val =", "needed Args: values (dict): Dictionary containing GUI elements values new_range (tuple): New value", "range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max)", "for name, _size, _visible in files_data_header], col_widths=[size for _name, size, _visible in files_data_header],", "event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if", "None Returns: None \"\"\" event, values = self.window.read() # See if user wants", "COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1", "COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" #", "assemble words stat table (sorted by word length) stat = [] if words_stat_filtered:", "current_max_val: new_max_val = new_range_max if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range,", "0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length =", "(str): GUI event name values (dict): Dictionary containing GUI elements values Returns: None", "None \"\"\" event, values = self.window.read() # See if user wants to quit", "length\", 15, True), (\"Count\", 15, True) ] # GUI - tables files_data_table =", "= [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "GUI elements values Returns: None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is", "loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set =", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:',", "def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing file path to cwgen.", "the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if", "result = key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements", "words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set", "Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\" table_data =", "'-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE", "current_min_val: new_min_val = new_range_min if new_min_val > current_max_val: new_max_val = new_min_val # range", "key, value in dictionary.items(): if nested_key is not None: data = value[nested_key] else:", "value for which key should be found nested_key (str): key in nested dictionary", "None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme =", "dispatched for handling Args: None Returns: None \"\"\" event, values = self.window.read() #", "self.handle_dictionary_add(values) # remove dictionary from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) #", "dictionary to search for a key lookup_value (str): value for which key should", "change their range assuring that sliders values gets updated when needed Args: values", "first key (in insertion order) will be returned. Args: dictionary (dict): dictionary to", "change if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True #", "= '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY =", "= (0, 0) # get information related to already loaded data dictionaries_info =", "WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY =", "in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True,", "material could be generated. Args: values(): min_length (int): Minimal words length passed in", "START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY =", "with filtered result which allow user to see the data out of which", "GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words", "WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI - sliders", "TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-'", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)),", "= self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words", "1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)]", "START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY", "= '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY =", "information related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() #", "if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val)", "GUI - header columns -> name, column size, visible? files_data_header = [ (\"UUID\",", "values become ridiculous. Args: event (str): GUI event name values (dict): Dictionary containing", "# remove dictionary from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle", "FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY", "returned. Args: dictionary (dict): dictionary to search for a key lookup_value (str): value", "in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name", "self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative", "return False # Remember index of selected table row if event == self.FILES_DATA_TABLE_KEY:", "Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\",", "the GUI ui = CwGenUI() # Display and interact with the GUI using", "(dict): dictionary to search for a key lookup_value (str): value for which key", "self.window.read() # See if user wants to quit or window was closed if", "RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-'", "enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2,", "[sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI -", "e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "> current_min_val: new_min_val = new_range_min if new_min_val > current_max_val: new_max_val = new_min_val #", "sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\",", "key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val =", "will be returned. Args: dictionary (dict): dictionary to search for a key lookup_value", "key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm", "self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data for UI elements if len(dictionaries_info)", "(\"Word length\", 15, True), (\"Count\", 15, True) ] # GUI - tables files_data_table", "of keys with exact same value first key (in insertion order) will be", "which training material could be generated. Args: values(): min_length (int): Minimal words length", "not None: words_max_length = max_length # get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered(", "handle letters set and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event", "if lookup_value not found ''' result = None for key, value in dictionary.items():", "dictionary remove button click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words", "size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])],", "based on provided string value keeping insertion order, meaning if dictionary contain a", "Minimal words length passed in when reading the value from self.window is not", "1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\",", "UI part related to words length sliders change their range assuring that sliders", "a dictionary to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary", "= '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY =", "new_range_max = new_range new_min_val = current_min_val new_max_val = current_max_val # range min value", "self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train", "length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training", "= new_min_val # range max value may affect sliders position if new_range_max <", "RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY", "= '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS", "(str): key in nested dictionary where lookup_value is Returns: result (str): key or", "window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key", "values (dict): Dictionary containing GUI elements values Returns: None \"\"\" table_data = []", "if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val)", "> current_max_val: new_max_val = new_min_val # range max value may affect sliders position", "words_to_gen_header], col_widths=[ size for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)]", "update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length,", "= CwGenUI() # Display and interact with the GUI using an Event Loop", "[ (\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\", 6, True), (\"Min len\",", "their values become ridiculous. Args: event (str): GUI event name values (dict): Dictionary", "[sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout =", "for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI -", "RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE", "STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY =", "int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val =", "None \"\"\" # self.files_table_idx == -1 when no dictionary in the table is", "if event == sg.WINDOW_CLOSED: self.window.close() return False # Remember index of selected table", "== self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') # Start the GUI", "len\", 7, True) ] words_filtered_header = [ (\"Word length\", 15, True), (\"Count\", 15,", "self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') # Start the GUI ui =", "gets dispatched for handling Args: None Returns: None \"\"\" event, values = self.window.read()", "(slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to words length", "GUI using an Event Loop while ui.handleGui(): pass # Game over del ui", "< current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min)", "Dictionary containing GUI elements values Returns: None \"\"\" # self.files_table_idx == -1 when", "(dict): Dictionary containing GUI elements values Returns: None \"\"\" table_data = [] sliders_range", "key (in insertion order) will be returned. Args: dictionary (dict): dictionary to search", "position if new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max if", "remove button click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length", "and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None):", "LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY =", "def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets", "index of selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] #", "(str): value for which key should be found nested_key (str): key in nested", "files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col =", "key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [ [sg.Frame('Dictionaries',", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version =", "WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-'", "1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\",", "list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list if", "return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to change in", "7, True), (\"Max len\", 7, True) ] words_filtered_header = [ (\"Word length\", 15,", "== lookup_value: result = key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant", "- button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY =", "self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length = min_length if max_length is", "size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0,", "gets updated. Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\"", "if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly handle CANCEL situation", "= int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not", "# on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path", "table_data = [] sliders_range = (0, 0) # get information related to already", "key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1),", "elements values Returns: None \"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val", "event (str): GUI event name values (dict): Dictionary containing GUI elements values Returns:", "True) ] # GUI - tables files_data_table = [sg.Table(values=[], headings=[name for name, _size,", "handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement to not let their values", "= self.cw_gen.get_words_stat() # generate updated data for UI elements if len(dictionaries_info) > 0:", "[sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]]", "= int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if", "< current_max_val: new_max_val = new_range_max if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update(", "'-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS", "UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length)", "# GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW", "the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a", "# range min value may affect sliders position if new_range_min > current_range_min: if", "key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection',", "words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by", "self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted by word length) stat =", "related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate", "[sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2,", ") if min_length is not None: words_min_length = min_length if max_length is not", "[words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output',", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local", "main loop where all events gets dispatched for handling Args: None Returns: None", "num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for name,", ")] words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_filtered_header], col_widths=[", "order, meaning if dictionary contain a number of keys with exact same value", "data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data for UI", "slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val =", "= 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS", "files_data_header], col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[ visible for _name, _size,", "min_length if max_length is not None: words_max_length = max_length # get filtered words", "False), (\"File name\", 14, True), (\"Words\", 6, True), (\"Min len\", 7, True), (\"Max", "words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_to_gen_header], col_widths=[ size", "slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val <", "is not None: words_max_length = max_length # get filtered words stat words_stat_filtered =", "GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY", "words stat with filtered result which allow user to see the data out", "= values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list if event == self.FILE_PATH_INPUT_KEY:", "input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY =", "relevant UI elements according to change in dictionary set. Args: values (dict): Dictionary", "the value from self.window is not yet updated (window hadling did not advanced", "for UI elements if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row =", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description']", "= [] sliders_range = (0, 0) # get information related to already loaded", "event == sg.WINDOW_CLOSED: self.window.close() return False # Remember index of selected table row", "= '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members", "Maximal words length passed in when reading the value from self.window is not", "provided string value keeping insertion order, meaning if dictionary contain a number of", "e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create", "/ Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)),", "_visible in words_to_gen_header], col_widths=[ size for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left',", "GUI ui = CwGenUI() # Display and interact with the GUI using an", "if needed if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val", "elements values Returns: None \"\"\" # self.files_table_idx == -1 when no dictionary in", "data out of which training material could be generated. Args: values(): min_length (int):", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\",", "passing its generated UUID to cwgen. UI gets updated. Args: values (dict): Dictionary", "(\"Word length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header = [ (\"Word length\",", "self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with", "clear file path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values):", "right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])],", "14, True), (\"Words\", 6, True), (\"Min len\", 7, True), (\"Max len\", 7, True)", "letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "keys with exact same value first key (in insertion order) will be returned.", "size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1),", "files_data_header = [ (\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\", 6, True),", "reading the value from self.window is not yet updated (window hadling did not", "True # UI theming sg.theme('Default1') # Start the GUI ui = CwGenUI() #", "handle words length change if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length,", "(tuple): New value range Returns: new_min_val, new_max_val (tuple): Updated words length sliders values", "self.window.close() return False # Remember index of selected table row if event ==", "user to see the data out of which training material could be generated.", "-> name, column size, visible? files_data_header = [ (\"UUID\", 0, False), (\"File name\",", "= cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online =", "ridiculous. Args: event (str): GUI event name values (dict): Dictionary containing GUI elements", "= [ (\"Word length\", 15, True), (\"Count\", 15, True) ] # GUI -", "self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where all events", "in files_data_header], col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[ visible for _name,", "size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY),", "\"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0:", "next loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set", "cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online()", "result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to change in dictionary", "button click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders", "GUI elements values Returns: None \"\"\" # self.files_table_idx == -1 when no dictionary", "max_length # get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets,", "files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0),", "WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE", "# GUI - window config WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE'", "updated (window hadling did not advanced to the next loop yet) max_length (int):", "self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI -", "sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY])", "# handle words length change if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY):", "General # GUI - window config WINDOW_DESCRIPTION = 'CW training material generator by", "self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name, column size,", "Args: event (str): GUI event name values (dict): Dictionary containing GUI elements values", "self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where", "UI theming sg.theme('Default1') # Start the GUI ui = CwGenUI() # Display and", "file path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle", "to negative to properly handle dictionary remove button click self.files_table_idx = -1 def", "self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val,", "WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE' # GUI - text config", "if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'],", "'CW training material generator by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY =", "= self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI", "# GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])],", "files_data_header], visible_column_map=[ visible for _name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True,", "for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'),", "True), (\"Words\", 6, True), (\"Min len\", 7, True), (\"Max len\", 7, True) ]", "self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set and generator scheme change if", "handling Args: None Returns: None \"\"\" event, values = self.window.read() # See if", "for key, value in dictionary.items(): if nested_key is not None: data = value[nested_key]", "set. Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\" table_data", "sg.WINDOW_CLOSED: self.window.close() return False # Remember index of selected table row if event", "[sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "not None: data = value[nested_key] else: data = value if data == lookup_value:", "__init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets =", "'-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI", "on provided string value keeping insertion order, meaning if dictionary contain a number", "= int(values[self.LETTERS_MAX_KEY]) # update them if needed if event == self.LETTERS_MIN_KEY: if slider_min_val", "sliders values gets updated when needed Args: values (dict): Dictionary containing GUI elements", "FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input", "current_max_val # range min value may affect sliders position if new_range_min > current_range_min:", "values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # on file", "table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update(", "] # GUI - tables files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible", "MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX", "self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length = min_length", "self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based", "'-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY =", "1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15,", "Returns: None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY])", "-1 when no dictionary in the table is selected if self.files_table_idx >= 0:", "import sys import PySimpleGUI as sg class CwGenUI: # General # GUI -", "(event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') # Start the", "] words_filtered_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header", "according to change in dictionary set. Args: values (dict): Dictionary containing GUI elements", "(\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)),", "values(): min_length (int): Minimal words length passed in when reading the value from", "PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI - combo", "self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if (event == self.LETTERS_MIN_KEY) or (event", "new_range_max < current_max_val: new_max_val = new_range_max if new_max_val < current_min_val: new_min_val = new_max_val", "rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\",", "length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val =", "size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0,", "version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons =", "update them if needed if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val", "= int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val = current_max_val #", "'-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY", "(\"Min len\", 7, True), (\"Max len\", 7, True) ] words_filtered_header = [ (\"Word", "key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "ONLINE-' # GUI - button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE", "Args: values(): min_length (int): Minimal words length passed in when reading the value", "theming sg.theme('Default1') # Start the GUI ui = CwGenUI() # Display and interact", "key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_to_gen_header], col_widths=[", "self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related", "value may affect sliders position if new_range_max < current_range_max: if new_range_max < current_max_val:", "_name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table =", "lookup_value (str): value for which key should be found nested_key (str): key in", "deletion by passing its generated UUID to cwgen. UI gets updated. Args: values", "TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY =", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\",", "when needed Args: values (dict): Dictionary containing GUI elements values new_range (tuple): New", "new_range_min if new_min_val > current_max_val: new_max_val = new_min_val # range max value may", "and interact with the GUI using an Event Loop while ui.handleGui(): pass #", "== self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list if", "WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY", "value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def", "values Returns: None \"\"\" # self.files_table_idx == -1 when no dictionary in the", "config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI", "sg class CwGenUI: # General # GUI - window config WINDOW_DESCRIPTION = 'CW", "_size, _visible in words_to_gen_header], col_widths=[ size for _name, size, _visible in words_to_gen_header], num_rows=5,", "'-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW", "= '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY =", "_size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[],", "'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM", "STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL", "GEN-' # GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY", "e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online,", "= [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "= '-E2CW VER ONLINE-' # GUI - button config FILE_BROWSE_KEY = '-ADD FILE-'", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns =", "self.handle_dictionary_delete(values) # handle words length change if (event == self.LETTERS_MIN_KEY) or (event ==", "See if user wants to quit or window was closed if event ==", "[files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col", "key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY),", "selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a", "words_max_length = max_length # get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length,", "1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0),", "'''Retrieves a key based on provided string value keeping insertion order, meaning if", "current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max =", "= '-E2CW PITCH RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS", "WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI - sliders config H_SLIDER_WIDTH = 21", "for _name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[],", "columns -> name, column size, visible? files_data_header = [ (\"UUID\", 0, False), (\"File", "if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data)", "config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY = '-FILES", "self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme =", "self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list if event", ">= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set", "= [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible", "PITCH RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY", "Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2,", "size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY),", "# Add a dictionary to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) #", "15, True) ] words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\", 15, True)", "words_info = self.cw_gen.get_words_stat() # generate updated data for UI elements if len(dictionaries_info) >", "key lookup_value (str): value for which key should be found nested_key (str): key", "not None: words_min_length = min_length if max_length is not None: words_max_length = max_length", "MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY =", "sg.Column(right_col)]] # Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self,", "== self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change(", "_update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with filtered result which allow user", "max_length (int): Maximal words length passed in when reading the value from self.window", "size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]]", "= [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training", "WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-'", "GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER", "value may affect sliders position if new_range_min > current_range_min: if new_range_min > current_min_val:", "name\", 14, True), (\"Words\", 6, True), (\"Min len\", 7, True), (\"Max len\", 7,", "self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)]", "= '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH", "> 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length", "as sg class CwGenUI: # General # GUI - window config WINDOW_DESCRIPTION =", "get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if", "'-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW", "1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\",", "word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values):", "# generate updated data for UI elements if len(dictionaries_info) > 0: for dictionary_data", "part related to words length sliders change their range assuring that sliders values", "'-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE", "in dictionary.items(): if nested_key is not None: data = value[nested_key] else: data =", "-1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local()", "0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path", "text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' #", "file path to cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI", "letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size',", "dictionary set. Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\"", "advanced to the next loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length", "file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min", "'-E2CW PITCH RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-'", "dictionary.items(): if nested_key is not None: data = value[nested_key] else: data = value", "scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True", "not found ''' result = None for key, value in dictionary.items(): if nested_key", "if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values)", "slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to words length sliders", "= '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI", "dictionary from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length", "# assemble words stat table (sorted by word length) stat = [] if", "= '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS", "result which allow user to see the data out of which training material", "new_min_val # range max value may affect sliders position if new_range_max < current_range_max:", "max_length is not None: words_max_length = max_length # get filtered words stat words_stat_filtered", "when no dictionary in the table is selected if self.files_table_idx >= 0: table_data", "MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-'", "table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS", "if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly handle", "else: data = value if data == lookup_value: result = key break return", "config WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE' # GUI - text", "be returned. Args: dictionary (dict): dictionary to search for a key lookup_value (str):", "'-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI - sliders config H_SLIDER_WIDTH", "'-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS", "self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val)", "\"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4,", "FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-'", "filtered result which allow user to see the data out of which training", "next loop yet) max_length (int): Maximal words length passed in when reading the", "Args: None Returns: None \"\"\" event, values = self.window.read() # See if user", "data = value if data == lookup_value: result = key break return result", "> slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val", "sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2,", "(event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values)", "size, _visible in files_data_header], visible_column_map=[ visible for _name, _size, visible in files_data_header], num_rows=5,", "containing GUI elements values Returns: None \"\"\" # self.files_table_idx == -1 when no", "key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)),", "Returns: None \"\"\" table_data = [] sliders_range = (0, 0) # get information", "stat = [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) #", "may affect sliders position if new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val", "(sorted by word length) stat = [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()):", "< slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values,", "out of which training material could be generated. Args: values(): min_length (int): Minimal", "of which training material could be generated. Args: values(): min_length (int): Minimal words", "range max value may affect sliders position if new_range_max < current_range_max: if new_range_max", "[sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure", "new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min if new_min_val >", "key in nested dictionary where lookup_value is Returns: result (str): key or None", "RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE", "(\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\", 6, True), (\"Min len\", 7,", "= (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values,", "value[nested_key] else: data = value if data == lookup_value: result = key break", "create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves", "min_length is not None: words_min_length = min_length if max_length is not None: words_max_length", "def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its generated UUID to cwgen.", "is not None: data = value[nested_key] else: data = value if data ==", "FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-'", "dictionary in the table is selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get()", "= [sg.Table(values=[], headings=[ name for name, _size, _visible in words_filtered_header], col_widths=[ size for", "RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY =", "table is selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0]", "new_range): \"\"\"Updates UI part related to words length sliders change their range assuring", "nested_key is not None: data = value[nested_key] else: data = value if data", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6,", "[sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6,", "1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files',", "e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "with exact same value first key (in insertion order) will be returned. Args:", "negative to properly handle dictionary remove button click self.files_table_idx = -1 def handle_words_length_sliders(self,", "H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS", "index to negative to properly handle dictionary remove button click self.files_table_idx = -1", "if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) #", "_visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation =", "= [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION,", "self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name, column size, visible? files_data_header =", "# update them if needed if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val:", "dictionary addition by passing file path to cwgen. UI gets updated. Args: values", "range Returns: new_min_val, new_max_val (tuple): Updated words length sliders values \"\"\" current_range_min, current_range_max", "RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY", "= [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "= new_range_max if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update(", "to quit or window was closed if event == sg.WINDOW_CLOSED: self.window.close() return False", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4,", "key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1),", "dictionary, lookup_value, nested_key=None): '''Retrieves a key based on provided string value keeping insertion", "sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in", "SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY =", "= '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI - button", "values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words", "VER ONLINE-' # GUI - button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY =", "self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative to properly handle dictionary remove", "self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') # Start the GUI ui", "self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') #", "(str): key or None if lookup_value not found ''' result = None for", "slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range):", "size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0,", "= self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set and", "_name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows", "\"\"\" # self.files_table_idx == -1 when no dictionary in the table is selected", "= '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY =", "movement to not let their values become ridiculous. Args: event (str): GUI event", "GUI elements values Returns: None \"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY])", "key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1),", "TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW", "containing GUI elements values Returns: None \"\"\" table_data = [] sliders_range = (0,", "\"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max", "self._update_ui_on_dictionary_set_change(values) # set table index to negative to properly handle dictionary remove button", "in words_to_gen_header], col_widths=[ size for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False,", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6,", "UI elements according to change in dictionary set. Args: values (dict): Dictionary containing", "sliders_range = (0, 0) # get information related to already loaded data dictionaries_info", "return True # UI theming sg.theme('Default1') # Start the GUI ui = CwGenUI()", "events gets dispatched for handling Args: None Returns: None \"\"\" event, values =", "its generated UUID to cwgen. UI gets updated. Args: values (dict): Dictionary containing", "key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set", "current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range", "App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window", "'-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS", "size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training", "E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW", "key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_filtered_header],", "> 0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']]", "generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=(", "None: words_max_length = max_length # get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length,", "name values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # get", "if min_length is not None: words_min_length = min_length if max_length is not None:", "enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2,", "Updated words length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY])", "current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update(", "# GUI - button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-'", "enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local,", "self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted by word", "if nested_key is not None: data = value[nested_key] else: data = value if", "(dict): Dictionary containing GUI elements values Returns: None \"\"\" # on file selection", "FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' #", "words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change(", "sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config(", "None: words_min_length = min_length if max_length is not None: words_max_length = max_length #", "if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI", "sys import PySimpleGUI as sg class CwGenUI: # General # GUI - window", "event name values (dict): Dictionary containing GUI elements values Returns: None \"\"\" #", "updated. Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\" #", "size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1),", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2,", "_visible in files_data_header], col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[ visible for", "the data out of which training material could be generated. Args: values(): min_length", "[sg.Table(values=[], headings=[ name for name, _size, _visible in words_to_gen_header], col_widths=[ size for _name,", "E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY =", "words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( )", "position if new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min if", "\"\"\"Handle words length sliders movement to not let their values become ridiculous. Args:", "None: data = value[nested_key] else: data = value if data == lookup_value: result", "-1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement to not let", "yet updated (window hadling did not advanced to the next loop yet) Returns:", "= 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-'", "table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative to properly handle", "E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW", "related to words length sliders change their range assuring that sliders values gets", "import PySimpleGUI as sg class CwGenUI: # General # GUI - window config", "which allow user to see the data out of which training material could", "GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen()", "current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val", "col_widths=[ size for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] #", "'-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE", "= 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-'", "size for _name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table =", "self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() #", "key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "dictionary deletion by passing its generated UUID to cwgen. UI gets updated. Args:", "words_filtered_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header =", "self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates", "'-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI -", "\"\"\" table_data = [] sliders_range = (0, 0) # get information related to", "= self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None):", "''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get(", "key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "generate updated data for UI elements if len(dictionaries_info) > 0: for dictionary_data in", "let their values become ridiculous. Args: event (str): GUI event name values (dict):", "TO GEN-' # GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10", "key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY),", "(event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length)", "1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)]", "new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val = current_max_val # range min", "[word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition", "'-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI - button config", "could be generated. Args: values(): min_length (int): Minimal words length passed in when", "self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list if event ==", "for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in", "self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event ==", "enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0),", "< current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max if new_max_val < current_min_val:", "def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based on provided string value", "updated data for UI elements if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info:", "if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat)", "new_min_val, new_max_val (tuple): Updated words length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range", "get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'),", "DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI -", "Dictionary containing GUI elements values Returns: None \"\"\" table_data = [] sliders_range =", "- table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY =", "(\"Max len\", 7, True) ] words_filtered_header = [ (\"Word length\", 15, True), (\"Count\",", "layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window self.window =", "= -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local =", "loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data for", "sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new", "E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW", "if new_range_max < current_max_val: new_max_val = new_range_max if new_max_val < current_min_val: new_min_val =", "self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values)", "(\"Count\", 15, True) ] words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\", 15,", "- tables files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header], col_widths=[size", "name, _size, _visible in words_to_gen_header], col_widths=[ size for _name, size, _visible in words_to_gen_header],", "values): \"\"\"Handle words length sliders movement to not let their values become ridiculous.", "= self.window.read() # See if user wants to quit or window was closed", "values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if", "justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size,", "= sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based on", "set and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY):", "import os import sys import PySimpleGUI as sg class CwGenUI: # General #", "# Display and interact with the GUI using an Event Loop while ui.handleGui():", "self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement to", "is not None: words_min_length = min_length if max_length is not None: words_max_length =", "> current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min if new_min_val > current_max_val:", "STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY =", "self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) #", "= 'CW training material generator by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY", "size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY),", "table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index", "to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by", "= int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed if event ==", "# General # GUI - window config WINDOW_DESCRIPTION = 'CW training material generator", "scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id,", "GUI event name values (dict): Dictionary containing GUI elements values Returns: None \"\"\"", "= current_min_val new_max_val = current_max_val # range min value may affect sliders position", "size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1),", "(int): Maximal words length passed in when reading the value from self.window is", "Returns: None \"\"\" event, values = self.window.read() # See if user wants to", "dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI", "FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-'", "no dictionary in the table is selected if self.files_table_idx >= 0: table_data =", "= '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY =", "1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)]", "auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=(", "their range assuring that sliders values gets updated when needed Args: values (dict):", "in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle", "slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self,", "size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name", "the next loop yet) max_length (int): Maximal words length passed in when reading", "(window hadling did not advanced to the next loop yet) Returns: None '''", "readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]),", "new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min)", "6, True), (\"Min len\", 7, True), (\"Max len\", 7, True) ] words_filtered_header =", "_visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for", "UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing file path", "\"\"\"Updates relevant UI elements according to change in dictionary set. Args: values (dict):", "= [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "material generator by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER", "key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH,", "if new_min_val > current_max_val: new_max_val = new_min_val # range max value may affect", "= value[nested_key] else: data = value if data == lookup_value: result = key", "with the GUI using an Event Loop while ui.handleGui(): pass # Game over", "Returns: None \"\"\" # self.files_table_idx == -1 when no dictionary in the table", "(tuple): Updated words length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val =", "[generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])],", "handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing file path to cwgen. UI", "in words_filtered_header], col_widths=[ size for _name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False,", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI", "did not advanced to the next loop yet) max_length (int): Maximal words length", "config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO", "words_max_length) # handle letters set and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY)", "to the next loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length =", "words length sliders change their range assuring that sliders values gets updated when", "= os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col", "assuring that sliders values gets updated when needed Args: values (dict): Dictionary containing", "words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ] # GUI", "_id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()),", "values): \"\"\"Handle dictionary deletion by passing its generated UUID to cwgen. UI gets", "value first key (in insertion order) will be returned. Args: dictionary (dict): dictionary", "> 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file", "if dictionary contain a number of keys with exact same value first key", "words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with filtered result which", "to words length sliders change their range assuring that sliders values gets updated", "(dict): Dictionary containing GUI elements values new_range (tuple): New value range Returns: new_min_val,", "if user wants to quit or window was closed if event == sg.WINDOW_CLOSED:", "self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update(", "= '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-'", "search for a key lookup_value (str): value for which key should be found", "loop where all events gets dispatched for handling Args: None Returns: None \"\"\"", "LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY", "where lookup_value is Returns: result (str): key or None if lookup_value not found", "(sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat", "generator_scheme)) # assemble words stat table (sorted by word length) stat = []", "insertion order) will be returned. Args: dictionary (dict): dictionary to search for a", "wants to quit or window was closed if event == sg.WINDOW_CLOSED: self.window.close() return", "self.cw_gen.get_words_stat() # generate updated data for UI elements if len(dictionaries_info) > 0: for", "min value may affect sliders position if new_range_min > current_range_min: if new_range_min >", "_visible in files_data_header], visible_column_map=[ visible for _name, _size, visible in files_data_header], num_rows=5, justification='left',", "new_range_max if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range,", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY),", "15, True) ] # GUI - tables files_data_table = [sg.Table(values=[], headings=[name for name,", "or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1') # Start", "the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list", "words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size,", "result = None for key, value in dictionary.items(): if nested_key is not None:", "visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[", "'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY", "properly handle dictionary remove button click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values):", "containing GUI elements values new_range (tuple): New value range Returns: new_min_val, new_max_val (tuple):", "FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI - sliders config H_SLIDER_WIDTH =", "= [ (\"Word length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header = [", "1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\",", "config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\"", "auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_to_gen_header],", "- combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self):", "new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max if new_max_val <", "= new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update(", "FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-'", "value range Returns: new_min_val, new_max_val (tuple): Updated words length sliders values \"\"\" current_range_min,", "TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO", "storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion", "letters set and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event ==", "= [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update", "= [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\",", "sliders change their range assuring that sliders values gets updated when needed Args:", "enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list(", "elements if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'],", "contain a number of keys with exact same value first key (in insertion", "= None for key, value in dictionary.items(): if nested_key is not None: data", "key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version", "MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN", "E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' #", "sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def", "_visible in words_filtered_header], col_widths=[ size for _name, size, _visible in words_filtered_header], num_rows=5, justification='left',", "if max_length is not None: words_max_length = max_length # get filtered words stat", "'-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx", "# handle letters set and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or", "'-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI", "(int): Minimal words length passed in when reading the value from self.window is", "True), (\"Min len\", 7, True), (\"Max len\", 7, True) ] words_filtered_header = [", "dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) #", "e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]]", "where all events gets dispatched for handling Args: None Returns: None \"\"\" event,", "self.files_table_idx == -1 when no dictionary in the table is selected if self.files_table_idx", "Dictionary containing GUI elements values Returns: None \"\"\" # get current positions slider_min_val", "CwGenUI: # General # GUI - window config WINDOW_DESCRIPTION = 'CW training material", "values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # self.files_table_idx ==", "keeping insertion order, meaning if dictionary contain a number of keys with exact", "selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative to", "[words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout", "= self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length = min_length if max_length", "# GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-'", "size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:',", "words_filtered_header], col_widths=[ size for _name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)]", "# set table index to negative to properly handle dictionary remove button click", "= self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)),", "\"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update", "current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min if new_min_val > current_max_val: new_max_val", "# range max value may affect sliders position if new_range_max < current_range_max: if", "in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True,", "words length sliders movement to not let their values become ridiculous. Args: event", "name, column size, visible? files_data_header = [ (\"UUID\", 0, False), (\"File name\", 14,", "value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where all events gets", "= 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW", "current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed", "[sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "loop yet) max_length (int): Maximal words length passed in when reading the value", "orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)),", "(words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range))", "be generated. Args: values(): min_length (int): Minimal words length passed in when reading", "that sliders values gets updated when needed Args: values (dict): Dictionary containing GUI", "SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx =", "self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values,", "Dictionary containing GUI elements values new_range (tuple): New value range Returns: new_min_val, new_max_val", "= self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data for UI elements if", "values, words_min_length, words_max_length) # handle letters set and generator scheme change if (event", "headings=[ name for name, _size, _visible in words_filtered_header], col_widths=[ size for _name, size,", "= int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val", "FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-'", "is not yet updated (window hadling did not advanced to the next loop", "= [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range =", "cwgen import os import sys import PySimpleGUI as sg class CwGenUI: # General", "new_range new_min_val = current_min_val new_max_val = current_max_val # range min value may affect", "DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY = '-FILE", "length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header = [ (\"Word length\", 15,", "be found nested_key (str): key in nested dictionary where lookup_value is Returns: result", "[] sliders_range = (0, 0) # get information related to already loaded data", "self.window is not yet updated (window hadling did not advanced to the next", "True) ] words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ]", "orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col =", "= current_max_val # range min value may affect sliders position if new_range_min >", "1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0),", "elements values Returns: None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is empty", "Remember index of selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0]", "generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return", "of selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add", "value from self.window is not yet updated (window hadling did not advanced to", "version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate", "size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download", "# GUI - header columns -> name, column size, visible? files_data_header = [", "CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its generated", "_id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name", "START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY", "_size, _visible in words_filtered_header], col_widths=[ size for _name, size, _visible in words_filtered_header], num_rows=5,", "get information related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat()", "key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns", "to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated", "= '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN", "os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly handle CANCEL", "[sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW',", "def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to words length sliders change", "positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed if", "letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted by word length)", "in nested dictionary where lookup_value is Returns: result (str): key or None if", "sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0,", "to search for a key lookup_value (str): value for which key should be", "1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download /", "data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True,", "size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY),", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation,", "self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set and generator", "[letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set", "MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX", "found ''' result = None for key, value in dictionary.items(): if nested_key is", "return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where all events gets dispatched", "cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path):", "None for key, value in dictionary.items(): if nested_key is not None: data =", "dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update", "value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return", "self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat", "event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list", "value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop", "'-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW", "# App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the", "_size, _visible in files_data_header], col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[ visible", "min_length (int): Minimal words length passed in when reading the value from self.window", "RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI - combo config", "[ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input',", "(event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming", "initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes", "col_widths=[ size for _name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table", "handleGui(self): \"\"\"GUI main loop where all events gets dispatched for handling Args: None", "needed if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update(", "Returns: None \"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY])", "words_min_length = min_length if max_length is not None: words_max_length = max_length # get", "size for _name, size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI", "handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its generated UUID to cwgen. UI", "GUI - combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def", "situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its generated UUID", "passed in when reading the value from self.window is not yet updated (window", "- header columns -> name, column size, visible? files_data_header = [ (\"UUID\", 0,", "length passed in when reading the value from self.window is not yet updated", "Returns: result (str): key or None if lookup_value not found ''' result =", "window config WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE' # GUI -", "[ (\"Word length\", 15, True), (\"Count\", 15, True) ] # GUI - tables", "dictionary where lookup_value is Returns: result (str): key or None if lookup_value not", "Args: dictionary (dict): dictionary to search for a key lookup_value (str): value for", "visible_column_map=[ visible for _name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY", "size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1),", "[e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and", "parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col),", "set table index to negative to properly handle dictionary remove button click self.files_table_idx", "self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online", "if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the", "[words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] #", "string value keeping insertion order, meaning if dictionary contain a number of keys", "column size, visible? files_data_header = [ (\"UUID\", 0, False), (\"File name\", 14, True),", "key or None if lookup_value not found ''' result = None for key,", "size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]),", "key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'],", "RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY", "MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO", "slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed if event == self.LETTERS_MIN_KEY: if", "new_max_val (tuple): Updated words length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val", "output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout", "Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\",", "in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) >", "generated. Args: values(): min_length (int): Minimal words length passed in when reading the", "values, min_length=None, max_length=None): '''Updates words stat with filtered result which allow user to", "'-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen =", "letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description'])", "key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] #", "self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local", "self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main", "name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1),", "change in dictionary set. Args: values (dict): Dictionary containing GUI elements values Returns:", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2,", "E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW", "file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage", "already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data", "empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values)", "sliders position if new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min", "generator by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-'", "== self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list if event == self.FILE_REMOVE_KEY:", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns", "_name, size, _visible in files_data_header], visible_column_map=[ visible for _name, _size, visible in files_data_header],", "words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters", "= [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)]", "- text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-'", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2,", "layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window self.window", "exact same value first key (in insertion order) will be returned. Args: dictionary", "path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary", "for name, _size, _visible in words_filtered_header], col_widths=[ size for _name, size, _visible in", "self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing file path to", "False # Remember index of selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx", "filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes,", "START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI - combo config COMBO_LETTERS_SET_KEY", "= [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id,", "num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size, _visible", "event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if (event == self.LETTERS_MIN_KEY)", "files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header], col_widths=[size for _name,", "sliders position if new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max", "see the data out of which training material could be generated. Args: values():", "was closed if event == sg.WINDOW_CLOSED: self.window.close() return False # Remember index of", "handle dictionary remove button click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle", "size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using", "\"\"\" event, values = self.window.read() # See if user wants to quit or", "values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self,", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch =", "justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in", "_id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)),", "= '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM", "# update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing", "len\", 7, True), (\"Max len\", 7, True) ] words_filtered_header = [ (\"Word length\",", "ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name, column size, visible?", "\"\"\"Handle dictionary deletion by passing its generated UUID to cwgen. UI gets updated.", "UI gets updated. Args: values (dict): Dictionary containing GUI elements values Returns: None", "[sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [", "value if data == lookup_value: result = key break return result def _update_ui_on_dictionary_set_change(self,", "# GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-'", "= '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY", "insertion order, meaning if dictionary contain a number of keys with exact same", "int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed if event == self.LETTERS_MIN_KEY:", "elements values Returns: None \"\"\" table_data = [] sliders_range = (0, 0) #", "- input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY", "range assuring that sliders values gets updated when needed Args: values (dict): Dictionary", "dictionary to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from", "for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()),", "sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)]", "self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self):", "cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI elements values Returns:", "e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update", "letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "UI elements if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'],", "set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data", "'-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE", "to see the data out of which training material could be generated. Args:", "\"\"\"Handle new dictionary addition by passing file path to cwgen. UI gets updated.", "[sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS", "slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val:", "= slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI", "= [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name)", "GUI - tables files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header],", "header columns -> name, column size, visible? files_data_header = [ (\"UUID\", 0, False),", "config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-'", "a key lookup_value (str): value for which key should be found nested_key (str):", "words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def", "words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is", "slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them if needed if event", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)),", "did not advanced to the next loop yet) Returns: None ''' words_min_length =", "new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max)", "== self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val,", "key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL", "size, _visible in words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation", "headings=[ name for name, _size, _visible in words_to_gen_header], col_widths=[ size for _name, size,", "CwGenUI() # Display and interact with the GUI using an Event Loop while", "# UI theming sg.theme('Default1') # Start the GUI ui = CwGenUI() # Display", "nested_key=None): '''Retrieves a key based on provided string value keeping insertion order, meaning", "max value may affect sliders position if new_range_max < current_range_max: if new_range_max <", "words_min_length, words_max_length) # handle letters set and generator scheme change if (event ==", "row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to", "by word length) stat = [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append(", "def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to change in dictionary set.", "words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None,", "updated (window hadling did not advanced to the next loop yet) Returns: None", "sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based on provided", "ui = CwGenUI() # Display and interact with the GUI using an Event", "'''Updates words stat with filtered result which allow user to see the data", "words length sliders values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val", "self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle", "FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-'", "max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\",", "letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length", "values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # get current", "for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6,", "15, True), (\"Count\", 15, True) ] # GUI - tables files_data_table = [sg.Table(values=[],", "if new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max if new_max_val", "key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data", "== self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if (event == self.LETTERS_MIN_KEY) or", "affect sliders position if new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val =", "justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\",", "'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted by word length) stat", "New value range Returns: new_min_val, new_max_val (tuple): Updated words length sliders values \"\"\"", "True), (\"Count\", 15, True) ] words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\",", "num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY),", "1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)]", "passing file path to cwgen. UI gets updated. Args: values (dict): Dictionary containing", "Args: values (dict): Dictionary containing GUI elements values new_range (tuple): New value range", "left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])],", "= '-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-' #", "elements according to change in dictionary set. Args: values (dict): Dictionary containing GUI", "Start the GUI ui = CwGenUI() # Display and interact with the GUI", "lookup_value not found ''' result = None for key, value in dictionary.items(): if", "VER LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI - button config FILE_BROWSE_KEY", "updated when needed Args: values (dict): Dictionary containing GUI elements values new_range (tuple):", "sliders movement to not let their values become ridiculous. Args: event (str): GUI", "remove dictionary from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words", "path to cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI elements", "name for name, _size, _visible in words_filtered_header], col_widths=[ size for _name, size, _visible", "key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1),", "[sg.Table(values=[], headings=[ name for name, _size, _visible in words_filtered_header], col_widths=[ size for _name,", "if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def", "result (str): key or None if lookup_value not found ''' result = None", "row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range", "handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its", "1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0),", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\",", "new_min_val = current_min_val new_max_val = current_max_val # range min value may affect sliders", "sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY", "new_min_val = new_range_min if new_min_val > current_max_val: new_max_val = new_min_val # range max", "button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY = '-E2CW", "E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input config", "# self.files_table_idx == -1 when no dictionary in the table is selected if", "if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list if event", "table index to negative to properly handle dictionary remove button click self.files_table_idx =", "in the table is selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid", "which key should be found nested_key (str): key in nested dictionary where lookup_value", "None \"\"\" table_data = [] sliders_range = (0, 0) # get information related", "name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\",", "= '-FILE PATH-' # GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY", "update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to words length sliders change their", "15, True), (\"Count\", 15, True) ] words_to_gen_header = [ (\"Word length\", 15, True),", "value in dictionary.items(): if nested_key is not None: data = value[nested_key] else: data", "sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in", "GUI elements values Returns: None \"\"\" table_data = [] sliders_range = (0, 0)", "slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if", "new_range (tuple): New value range Returns: new_min_val, new_max_val (tuple): Updated words length sliders", "(dict): Dictionary containing GUI elements values Returns: None \"\"\" # self.files_table_idx == -1", "current_min_val new_max_val = current_max_val # range min value may affect sliders position if", "sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name", "in dictionary set. Args: values (dict): Dictionary containing GUI elements values Returns: None", "(\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min =", "key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1],", "= self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name, column", "[sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY),", "words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme))", "not advanced to the next loop yet) max_length (int): Maximal words length passed", "tables files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header], col_widths=[size for", "LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY", "self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing its generated UUID to", "yet updated (window hadling did not advanced to the next loop yet) max_length", "by passing file path to cwgen. UI gets updated. Args: values (dict): Dictionary", "lookup_value is Returns: result (str): key or None if lookup_value not found '''", "(0, 0) # get information related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info()", "gets updated when needed Args: values (dict): Dictionary containing GUI elements values new_range", "= [sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header], col_widths=[size for _name, size,", "key should be found nested_key (str): key in nested dictionary where lookup_value is", "(dict): Dictionary containing GUI elements values Returns: None \"\"\" # get current positions", "new_max_val = current_max_val # range min value may affect sliders position if new_range_min", "new_range_min > current_min_val: new_min_val = new_range_min if new_min_val > current_max_val: new_max_val = new_min_val", "target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1),", "LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY", "GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI - table config", "= '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY =", "stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) #", "_get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based on provided string value keeping", "words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values,", "int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length", "[sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])],", "words length change if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length", "update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary addition by passing file", "selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid):", "self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val", "Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme", "slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part", "if new_range_min > current_min_val: new_min_val = new_range_min if new_min_val > current_max_val: new_max_val =", "hadling did not advanced to the next loop yet) max_length (int): Maximal words", "self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) def", "21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY", "key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW',", "return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to words", "_id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1),", "key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max", "new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val) self.window[self.LETTERS_MAX_KEY].update( range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update(", "current_range_max: if new_range_max < current_max_val: new_max_val = new_range_max if new_max_val < current_min_val: new_min_val", "= [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h',", "become ridiculous. Args: event (str): GUI event name values (dict): Dictionary containing GUI", "= self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length =", "'-WORDS TO GEN-' # GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT =", "a key based on provided string value keeping insertion order, meaning if dictionary", "values): \"\"\"Updates relevant UI elements according to change in dictionary set. Args: values", "properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self, values): \"\"\"Handle dictionary deletion by passing", "1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters", "(\"Count\", 15, True) ] # GUI - tables files_data_table = [sg.Table(values=[], headings=[name for", "enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in", "GUI - window config WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE' #", "value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where all", "affect sliders position if new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val =", "key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch", "event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set and generator scheme", "col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[ visible for _name, _size, visible", "data for UI elements if len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row", "for handling Args: None Returns: None \"\"\" event, values = self.window.read() # See", "key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\",", "in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for _id, data in self.letters_sets.items()), 1), readonly=True, enable_events=True,", "e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] #", "same value first key (in insertion order) will be returned. Args: dictionary (dict):", "current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val = current_max_val", "value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val) def handleGui(self): \"\"\"GUI", "'-E2CW DOWNLOAD-' E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY =", "H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY =", "len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear", "GUI elements values new_range (tuple): New value range Returns: new_min_val, new_max_val (tuple): Updated", "Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value,", "1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\",", "GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"),", "range=new_range, value=new_max_val) self.window[self.LETTERS_MIN_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update( value=new_range_max) self.window[self.LETTERS_MAX_RANGE_START_KEY].update( value=new_range_min) self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update( value=new_range_max) return (new_min_val, new_max_val)", "yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get()", "the GUI using an Event Loop while ui.handleGui(): pass # Game over del", "words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted", "on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path =", "sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)]", "= new_range new_min_val = current_min_val new_max_val = current_max_val # range min value may", "if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event,", "RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY", "values): \"\"\"Handle new dictionary addition by passing file path to cwgen. UI gets", "for _name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table", "len(dictionaries_info) > 0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'],", "= [ (\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\", 6, True), (\"Min", "RANGE START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE", "self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def", "# See if user wants to quit or window was closed if event", "START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY =", "Returns: new_min_val, new_max_val (tuple): Updated words length sliders values \"\"\" current_range_min, current_range_max =", "= slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val", "orientation='h', enable_events=True, key=self.E2CW_WPM_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)] e2cw_farns = [sg.Text(\"FARNS:\", size=(6, 1)), sg.Text(\"0\",", "int(values[self.LETTERS_MAX_KEY]) letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get() generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None:", "None if lookup_value not found ''' result = None for key, value in", "Display and interact with the GUI using an Event Loop while ui.handleGui(): pass", "self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True,", "(\"File name\", 14, True), (\"Words\", 6, True), (\"Min len\", 7, True), (\"Max len\",", "[sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm,", "E2CW_GENERATE_KEY = '-E2CW GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-'", "or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length,", "or None if lookup_value not found ''' result = None for key, value", "# GUI - rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\",", "words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table", "= key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according", "name, _size, _visible in words_filtered_header], col_widths=[ size for _name, size, _visible in words_filtered_header],", "training material generator by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW", "# GUI - sliders config H_SLIDER_WIDTH = 21 H_SLIDER_HEIGHT = 10 LETTERS_MIN_KEY =", "when reading the value from self.window is not yet updated (window hadling did", "value=slider_max_val) if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update(", "yet) max_length (int): Maximal words length passed in when reading the value from", "# Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets() self.training_generator_schemes =", "words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size, _visible in words_filtered_header], col_widths=[ size", "words length passed in when reading the value from self.window is not yet", "closed if event == sg.WINDOW_CLOSED: self.window.close() return False # Remember index of selected", "click self.files_table_idx = -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement", "len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length']) # update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length,", "] words_to_gen_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ] #", "name for name, _size, _visible in words_to_gen_header], col_widths=[ size for _name, size, _visible", "== self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val = slider_min_val self.window[self.LETTERS_MAX_KEY].update( value=slider_max_val) if event", "= min_length if max_length is not None: words_max_length = max_length # get filtered", "lookup_value, nested_key=None): '''Retrieves a key based on provided string value keeping insertion order,", "name, _size, _visible in files_data_header], col_widths=[size for _name, size, _visible in files_data_header], visible_column_map=[", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From", "meaning if dictionary contain a number of keys with exact same value first", "selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if", "LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE", "== self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values) return True # UI theming sg.theme('Default1')", "values Returns: None \"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val =", "self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1),", "values \"\"\" current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min,", "'-E2CW WPM RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE", "min_length=None, max_length=None): '''Updates words stat with filtered result which allow user to see", "advanced to the next loop yet) max_length (int): Maximal words length passed in", "int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val = current_min_val new_max_val = current_max_val # range", "if event == self.LETTERS_MAX_KEY: if slider_max_val < slider_min_val: slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val)", "nested_key (str): key in nested dictionary where lookup_value is Returns: result (str): key", "== -1 when no dictionary in the table is selected if self.files_table_idx >=", "sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online", "'-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI -", "stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self, values): \"\"\"Handle new dictionary", "[e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App layout layout = [[sg.Column(left_col), sg.VSeparator(),", "self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\")", "stat table (sorted by word length) stat = [] if words_stat_filtered: for word_length", "from self.window is not yet updated (window hadling did not advanced to the", "= '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-' # GUI", "1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY),", "LOCAL-' E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI - button config FILE_BROWSE_KEY =", "[[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]] # Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout)", "interact with the GUI using an Event Loop while ui.handleGui(): pass # Game", "word length) stat = [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length,", "words stat table (sorted by word length) stat = [] if words_stat_filtered: for", "(in insertion order) will be returned. Args: dictionary (dict): dictionary to search for", "for _name, size, _visible in files_data_header], visible_column_map=[ visible for _name, _size, visible in", "them if needed if event == self.LETTERS_MIN_KEY: if slider_min_val > slider_max_val: slider_max_val =", "WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-'", "values (dict): Dictionary containing GUI elements values new_range (tuple): New value range Returns:", "length sliders movement to not let their values become ridiculous. Args: event (str):", "set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version,", "generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns,", "key based on provided string value keeping insertion order, meaning if dictionary contain", "'-E2CW VER ONLINE-' # GUI - button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY", "enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2,", "START-' LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-'", "selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0),", "STOP-' LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-' LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-'", "sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY),", "to change in dictionary set. Args: values (dict): Dictionary containing GUI elements values", "_update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to change in dictionary set. Args:", "new_max_val = new_min_val # range max value may affect sliders position if new_range_max", "not yet updated (window hadling did not advanced to the next loop yet)", "= new_range_min if new_min_val > current_max_val: new_max_val = new_min_val # range max value", "Add a dictionary to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove", "= value if data == lookup_value: result = key break return result def", "values gets updated when needed Args: values (dict): Dictionary containing GUI elements values", "7, True) ] words_filtered_header = [ (\"Word length\", 15, True), (\"Count\", 15, True)", "change if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders(", "import cwgen import os import sys import PySimpleGUI as sg class CwGenUI: #", "hadling did not advanced to the next loop yet) Returns: None ''' words_min_length", "[sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]), default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for", "words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),", "or window was closed if event == sg.WINDOW_CLOSED: self.window.close() return False # Remember", "None \"\"\" # get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) #", "[sg.Table(values=[], headings=[name for name, _size, _visible in files_data_header], col_widths=[size for _name, size, _visible", "True) ] words_filtered_header = [ (\"Word length\", 15, True), (\"Count\", 15, True) ]", "[sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True,", "nested dictionary where lookup_value is Returns: result (str): key or None if lookup_value", "to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the", "length) stat = [] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]])", "range min value may affect sliders position if new_range_min > current_range_min: if new_range_min", "training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0,", "- window config WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE' # GUI", "size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1),", "new dictionary addition by passing file path to cwgen. UI gets updated. Args:", "[dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'],", "# get information related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info =", "# Configure and create the window self.window = sg.Window(self.WINDOW_DESCRIPTION, layout) def _get_dictionary_key_by_value(self, dictionary,", "to not let their values become ridiculous. Args: event (str): GUI event name", "True), (\"Count\", 15, True) ] # GUI - tables files_data_table = [sg.Table(values=[], headings=[name", "\"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen = cwgen.CwGen() self.letters_sets = self.cw_gen.get_letters_sets()", "number of keys with exact same value first key (in insertion order) will", "if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative to properly handle dictionary", "for which key should be found nested_key (str): key in nested dictionary where", "not advanced to the next loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY])", "words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble", "and generator scheme change if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY): self._update_ui_on_words_filtering_change(values)", "# get current positions slider_min_val = int(values[self.LETTERS_MIN_KEY]) slider_max_val = int(values[self.LETTERS_MAX_KEY]) # update them", "= [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY,", "0) # get information related to already loaded data dictionaries_info = self.cw_gen.get_dictionaries_info() words_info", "RANGE STOP-' E2CW_FARNS_KEY = '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY", "# Remember index of selected table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx =", "containing GUI elements values Returns: None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY]", "1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\",", "value keeping insertion order, meaning if dictionary contain a number of keys with", "= max_length # get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value(", "by SP6HFE' # GUI - text config E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-' E2CW_VER_ONLINE_KEY", "(new_min_val, new_max_val) def handleGui(self): \"\"\"GUI main loop where all events gets dispatched for", "the table is selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid =", "elements values new_range (tuple): New value range Returns: new_min_val, new_max_val (tuple): Updated words", "values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary to the list if event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values)", "from the list if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change", "the next loop yet) Returns: None ''' words_min_length = int(values[self.LETTERS_MIN_KEY]) words_max_length = int(values[self.LETTERS_MAX_KEY])", "is selected if self.files_table_idx >= 0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if", "values Returns: None \"\"\" table_data = [] sliders_range = (0, 0) # get", "10 LETTERS_MIN_KEY = '-LETTERS MIN-' LETTERS_MAX_KEY = '-LETTERS MAX-' LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN", "\"\"\"Updates UI part related to words length sliders change their range assuring that", "event == self.FILE_PATH_INPUT_KEY: self.handle_dictionary_add(values) # remove dictionary from the list if event ==", "= '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH", "by passing its generated UUID to cwgen. UI gets updated. Args: values (dict):", "GENERATE-' # GUI - input config FILE_PATH_INPUT_KEY = '-FILE PATH-' # GUI -", "dictionary contain a number of keys with exact same value first key (in", "sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm = [sg.Text(\"WPM:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY),", "[sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY), sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)] e2cw_buttons", "input', [words_filtered_table])]] right_col = [ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training", "may affect sliders position if new_range_max < current_range_max: if new_range_max < current_max_val: new_max_val", "dictionaries_info = self.cw_gen.get_dictionaries_info() words_info = self.cw_gen.get_words_stat() # generate updated data for UI elements", "e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY), sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)] e2cw_wpm =", "Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # on", "to the next loop yet) max_length (int): Maximal words length passed in when", "def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement to not let their", "0: table_data = self.window[self.FILES_DATA_TABLE_KEY].get() selected_dictionary_uuid = table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table", "data == lookup_value: result = key break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates", "= [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list( self.letters_sets.items())[0][1]['description'], size=(max(len(data['description']) for", "is Returns: result (str): key or None if lookup_value not found ''' result", "sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY),", "= table_data[self.files_table_idx][0] if self.cw_gen.remove_dictionary(selected_dictionary_uuid): self._update_ui_on_dictionary_set_change(values) # set table index to negative to properly", "E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-' E2CW_PITCH_KEY", "TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE", "- columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length',", "self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for _id, name", "if event == self.FILE_REMOVE_KEY: self.handle_dictionary_delete(values) # handle words length change if (event ==", "layout) def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None): '''Retrieves a key based on provided string", "1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)]", "[ (\"Word length\", 15, True), (\"Count\", 15, True) ] words_to_gen_header = [ (\"Word", "(\"Words\", 6, True), (\"Min len\", 7, True), (\"Max len\", 7, True) ] words_filtered_header", "= '-E2CW FARNS-' E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-' E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS", "files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")),", "os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path): self._update_ui_on_dictionary_set_change(values) # clear file path storage to properly", "visible for _name, _size, visible in files_data_header], num_rows=5, justification='left', auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )]", "0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI -", "size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1),", "# Start the GUI ui = CwGenUI() # Display and interact with the", "values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) # handle letters set and generator scheme change", "RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-' E2CW_WPM_KEY = '-E2CW WPM-'", "columns left_col = [ [sg.Frame('Dictionaries', [files_operation, files_data_table])], [sg.Frame('Letters selection', [letters_set])], [sg.Frame('Words length', [letters_min,", "all events gets dispatched for handling Args: None Returns: None \"\"\" event, values", "size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_WPM_KEY),", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max =", "for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat) def handle_dictionary_add(self,", "# clear file path storage to properly handle CANCEL situation self.window[self.FILE_PATH_INPUT_KEY].update(value=\"\") def handle_dictionary_delete(self,", "readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_KEY), sg.Slider(range=(0,", "current_max_val: new_max_val = new_min_val # range max value may affect sliders position if", "PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-'", "event, values = self.window.read() # See if user wants to quit or window", "dictionary (dict): dictionary to search for a key lookup_value (str): value for which", "1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]), default_value=list(", "in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train = [sg.Text(\"SIZE:\", size=(6, 1)), sg.Text(\"0\", size=(2,", "UUID to cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI elements", "ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name,", "max_length=None): '''Updates words stat with filtered result which allow user to see the", "for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if", "0: for dictionary_data in dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row)", "int(values[self.LETTERS_MAX_KEY]) # update them if needed if event == self.LETTERS_MIN_KEY: if slider_min_val >", "dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0: sliders_range = (words_info['min_length'], words_info['max_length'])", "True), (\"Max len\", 7, True) ] words_filtered_header = [ (\"Word length\", 15, True),", "in when reading the value from self.window is not yet updated (window hadling", "size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)] words_to_train =", "values = self.window.read() # See if user wants to quit or window was", "TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-' WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN", "GUI - button config FILE_BROWSE_KEY = '-ADD FILE-' FILE_REMOVE_KEY = '-REMOVE FILE-' E2CW_DOWNLOAD_KEY", "- rows files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS", "= [sg.Table(values=[], headings=[ name for name, _size, _visible in words_to_gen_header], col_widths=[ size for", "selection', [letters_set])], [sg.Frame('Words length', [letters_min, letters_max])], [sg.Frame('Training input', [words_filtered_table])]] right_col = [ [sg.Frame('Training", "values Returns: None \"\"\" # on file selection cancel values[FILE_PATH_INPUT_KEY] is empty if", "PySimpleGUI as sg class CwGenUI: # General # GUI - window config WINDOW_DESCRIPTION", "new_max_val) def handleGui(self): \"\"\"GUI main loop where all events gets dispatched for handling", "Dictionary containing GUI elements values Returns: None \"\"\" # on file selection cancel", "if new_range_min > current_range_min: if new_range_min > current_min_val: new_min_val = new_range_min if new_min_val", "[sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])], [sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]] # App", "1), key=self.E2CW_FARNS_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)]", "def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with filtered result which allow", "file selection cancel values[FILE_PATH_INPUT_KEY] is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY])", "\"\"\"GUI main loop where all events gets dispatched for handling Args: None Returns:", "words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with filtered result", "to properly handle dictionary remove button click self.files_table_idx = -1 def handle_words_length_sliders(self, event,", "to cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI elements values", "self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY), sg.Text(\"0\", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)] e2cw_version = [sg.Text('Local version:', size=(15,", "slider_min_val = slider_max_val self.window[self.LETTERS_MIN_KEY].update( value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates", "STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-'", "a number of keys with exact same value first key (in insertion order)", "table (sorted by word length) stat = [] if words_stat_filtered: for word_length in", "self.training_generator_schemes = self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header", "visible? files_data_header = [ (\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\", 6,", "enable_events=True, key=self.E2CW_PITCH_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)] # GUI - columns left_col = [", "= self.window[self.LETTERS_MIN_KEY].Range current_min_val = int(values[self.LETTERS_MIN_KEY]) current_max_val = int(values[self.LETTERS_MAX_KEY]) new_range_min, new_range_max = new_range new_min_val", "length change if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY): words_min_length, words_max_length =", "def handleGui(self): \"\"\"GUI main loop where all events gets dispatched for handling Args:", "length sliders change their range assuring that sliders values gets updated when needed", "'-LETTERS MAX RANGE STOP-' WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-' WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO", "= self.cw_gen.get_ebook2cw_version_online() # GUI - header columns -> name, column size, visible? files_data_header", "dictionaries_info: row = [dictionary_data['uuid'], dictionary_data['name'], dictionary_data['stat']['words_count'], dictionary_data['stat']['min_length'], dictionary_data['stat']['max_length']] table_data.append(row) if len(words_info) > 0:", "# GUI - tables files_data_table = [sg.Table(values=[], headings=[name for name, _size, _visible in", "enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'), sg.Combo(values=([data['description'] for _id,", "[] if words_stat_filtered: for word_length in sorted(words_stat_filtered['words_stat'].keys()): stat.append( [word_length, words_stat_filtered['words_stat'][word_length]]) # update UI", "orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\",", "words_to_gen_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)] # GUI - rows files_operation = [sg.Input(enable_events=True, visible=False,", "self._get_dictionary_key_by_value( self.letters_sets, letters_set, 'description'), self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme)) # assemble words stat table (sorted by", "for a key lookup_value (str): value for which key should be found nested_key", "orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY), sg.Text(\"0\", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)] e2cw_pitch = [sg.Text(\"PITCH:\", size=(6, 1)), sg.Text(\"0\",", "allow user to see the data out of which training material could be", "table row if event == self.FILES_DATA_TABLE_KEY: self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0] # Add a dictionary", "= -1 def handle_words_length_sliders(self, event, values): \"\"\"Handle words length sliders movement to not", "user wants to quit or window was closed if event == sg.WINDOW_CLOSED: self.window.close()", "'-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE", "STOP-' E2CW_PITCH_KEY = '-E2CW PITCH-' E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-' E2CW_PITCH_RANGE_STOP_KEY =", "\"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove selected\", key=self.FILE_REMOVE_KEY)] letters_min = [sg.Text(\"MIN:\", size=(4, 1)), sg.Text(\"0\", size=(2,", "event, values): \"\"\"Handle words length sliders movement to not let their values become", "size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set = [sg.Text('From set:'),", "stat with filtered result which allow user to see the data out of", "values, words_min_length, words_max_length) def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None): '''Updates words stat with filtered", "sg.theme('Default1') # Start the GUI ui = CwGenUI() # Display and interact with", "E2CW_WPM_KEY = '-E2CW WPM-' E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-' E2CW_WPM_RANGE_STOP_KEY = '-E2CW", "E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-' # GUI - button config FILE_BROWSE_KEY = '-ADD", "= '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class initialization\"\"\" # Members self.files_table_idx = -1 self.cw_gen", "= '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' #", "''' result = None for key, value in dictionary.items(): if nested_key is not", "generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get( ) if min_length is not None: words_min_length = min_length if", "new_max_val = new_range_max if new_max_val < current_min_val: new_min_val = new_max_val self.window[self.LETTERS_MIN_KEY].update( range=new_range, value=new_min_val)", "= self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns", "_name, size, _visible in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[", "== self.LETTERS_MAX_KEY): words_min_length, words_max_length = self.handle_words_length_sliders( event, values) self._update_ui_on_words_filtering_change( values, words_min_length, words_max_length) #", "PATH-' # GUI - table config FILES_DATA_TABLE_KEY = '-FILES DATA-' WORDS_FILTERED_TABLE_KEY = '-WORDS", "= '-WORDS FILTERED-' WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-' # GUI - sliders config", "training material could be generated. Args: values(): min_length (int): Minimal words length passed", "window was closed if event == sg.WINDOW_CLOSED: self.window.close() return False # Remember index", "# update UI self.window[self.FILES_DATA_TABLE_KEY].update( values=table_data) words_min_length, words_max_length = self.update_words_length_sliders_config( values, (sliders_range)) self._update_ui_on_words_filtering_change( values,", "sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)] letters_set =", "default_value=list( self.training_generator_schemes.items())[0][1], size=( max(len(name) for _id, name in self.training_generator_schemes.items()), 1), readonly=True, enable_events=True, key=self.COMBO_MATERIAL_GENERATION_KEY)]", "combo config COMBO_LETTERS_SET_KEY = '-LETTERS SET-' COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-' def __init__(self): \"\"\"Class", "data in self.letters_sets.items()), 1), readonly=True, enable_events=True, key=self.COMBO_LETTERS_SET_KEY)] generator_scheme = [sg.Text('Using scheme:'), sg.Combo(values=([name for", "break return result def _update_ui_on_dictionary_set_change(self, values): \"\"\"Updates relevant UI elements according to change", "visible=False, key=self.FILE_PATH_INPUT_KEY), sg.FileBrowse(button_text=\"Add\", file_types=( (\"ALL Files\", \"*.*\"), (\"CWOPS sessions\", \"*.cwo\")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY), sg.Button(button_text=\"Remove", "found nested_key (str): key in nested dictionary where lookup_value is Returns: result (str):", "generated UUID to cwgen. UI gets updated. Args: values (dict): Dictionary containing GUI", "self.cw_gen.get_training_generator_schemes() ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local() ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online() # GUI - header columns ->", "is empty if len(values[self.FILE_PATH_INPUT_KEY]) > 0: file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY]) if os.path.isfile(file_path): if self.cw_gen.add_dictionary(file_path):", "Args: values (dict): Dictionary containing GUI elements values Returns: None \"\"\" # self.files_table_idx", "in words_filtered_header], num_rows=5, justification='left', auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)] words_to_gen_table = [sg.Table(values=[], headings=[ name for name,", "for name, _size, _visible in words_to_gen_header], col_widths=[ size for _name, size, _visible in", "sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)] letters_max = [sg.Text(\"MAX:\", size=(4, 1)), sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY),", "class CwGenUI: # General # GUI - window config WINDOW_DESCRIPTION = 'CW training", "addition by passing file path to cwgen. UI gets updated. Args: values (dict):", "# get filtered words stat words_stat_filtered = self.cw_gen.get_words_stat_filtered( words_min_length, words_max_length, self._get_dictionary_key_by_value( self.letters_sets, letters_set,", "sg.Text(\"0\", size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY), sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT), orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY), sg.Text(\"0\", size=(2,", "(window hadling did not advanced to the next loop yet) max_length (int): Maximal", "[ [sg.Frame('Training generator', [generator_scheme])], [sg.Frame('Training set size', [words_to_train])], [sg.Frame('Training output', [words_to_gen_table])], [sg.Frame('Audible parameters',", "data = value[nested_key] else: data = value if data == lookup_value: result =", "not let their values become ridiculous. Args: event (str): GUI event name values", "value=slider_min_val) return (slider_min_val, slider_max_val) def update_words_length_sliders_config(self, values, new_range): \"\"\"Updates UI part related to", "== sg.WINDOW_CLOSED: self.window.close() return False # Remember index of selected table row if", "order) will be returned. Args: dictionary (dict): dictionary to search for a key", "size, visible? files_data_header = [ (\"UUID\", 0, False), (\"File name\", 14, True), (\"Words\",", "auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY )] words_filtered_table = [sg.Table(values=[], headings=[ name for name, _size, _visible" ]
[ "chunk_length = input[2] # print(\"data\", data) found = data.find(test) if found != -1:", "chunkit(str, chunk_length): # make chunks of strings the way we like chunks =", "chars return [x.strip(\" \\t\\n\\r\") for x in l] def compare(input): # main comparison", "they are rounding remainder orphans from the # chunking process if len(words) >=", "wordLists: if chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks def", "i in trange(max, min, -1): # print('text, i',text, i) res = run(i, text,", "comparison function test = input[0] data = input[1] chunk_length = input[2] # print(\"data\",", "# don't return matched chunks shorter than the current chunk # length, even", "lines testChunks = list(filter(None, testChunks)) results = [] for testLine in testChunks: found", "run(i, text, dataset) if len(res) > 0: for r in res: if r", "\"\") matches = [] for i in trange(max, min, -1): # print('text, i',text,", "i in range(0, len(lst), n): yield lst[i : i + n] def chunkit(str,", "res = run(i, text, dataset) if len(res) > 0: for r in res:", "for x in l] def compare(input): # main comparison function test = input[0]", "of a dupe string # crosses the border between chunks import colorama from", "clean out some unneeded chars return [x.strip(\" \\t\\n\\r\") for x in l] def", "test = input[0] data = input[1] chunk_length = input[2] # print(\"data\", data) found", "def make_chunks(lst, n): # Yield successive n-sized chunks from lst. for i in", "of strings the way we like chunks = [] list = str.split() list", "# chunking process if len(words) >= chunk_length: return test def make_chunks(lst, n): #", "list(filter(None, testChunks)) results = [] for testLine in testChunks: found = compare([testLine, dataset,", "import Fore from tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5", "= [] list = str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length) for", "= chunkit(text, chunk_length) # remove empty lines testChunks = list(filter(None, testChunks)) results =", "testLine in testChunks: found = compare([testLine, dataset, chunk_length]) if found != None: print(\"found\",", "dupe string # crosses the border between chunks import colorama from colorama import", "def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None assert dataset !=", "range(0, len(lst), n): yield lst[i : i + n] def chunkit(str, chunk_length): #", "the border between chunks import colorama from colorama import Fore from tqdm import", "if len(words) >= chunk_length: return test def make_chunks(lst, n): # Yield successive n-sized", "tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10", "print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text", "len(lst), n): yield lst[i : i + n] def chunkit(str, chunk_length): # make", "main comparison function test = input[0] data = input[1] chunk_length = input[2] #", "n] def chunkit(str, chunk_length): # make chunks of strings the way we like", "lst. for i in range(0, len(lst), n): yield lst[i : i + n]", "return test def make_chunks(lst, n): # Yield successive n-sized chunks from lst. for", "chunk in wordLists: if chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk) return", "= dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) # remove empty lines testChunks", "testChunks = chunkit(text, chunk_length) # remove empty lines testChunks = list(filter(None, testChunks)) results", "\"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset", "import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l): # clean", "dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) # remove empty lines", "!= None assert dataset != None text = text.replace(os.linesep, \"\") matches = []", "max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None assert dataset != None text", "return matched chunks shorter than the current chunk # length, even if they", "found != None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None,", "in range(0, len(lst), n): yield lst[i : i + n] def chunkit(str, chunk_length):", "shorter than the current chunk # length, even if they are rounding remainder", "input[2] # print(\"data\", data) found = data.find(test) if found != -1: words =", "string # crosses the border between chunks import colorama from colorama import Fore", "os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l): # clean out", "text != None assert dataset != None text = text.replace(os.linesep, \"\") matches =", "the # chunking process if len(words) >= chunk_length: return test def make_chunks(lst, n):", "= 5 DEFAULT_MAX = 10 def striplist(l): # clean out some unneeded chars", "list = striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in wordLists: if chunk", "[] for testLine in testChunks: found = compare([testLine, dataset, chunk_length]) if found !=", "make_chunks(list, chunk_length) for chunk in wordLists: if chunk != \"\": chunk = \"", "for testLine in testChunks: found = compare([testLine, dataset, chunk_length]) if found != None:", "dataset=None, verbose=False): assert text != None assert dataset != None text = text.replace(os.linesep,", "rounding remainder orphans from the # chunking process if len(words) >= chunk_length: return", "testChunks = list(filter(None, testChunks)) results = [] for testLine in testChunks: found =", "l] def compare(input): # main comparison function test = input[0] data = input[1]", "i',text, i) res = run(i, text, dataset) if len(res) > 0: for r", "results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None", "colorama import Fore from tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN =", "found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text !=", "testChunks)) results = [] for testLine in testChunks: found = compare([testLine, dataset, chunk_length])", "= \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep,", "windows instead of chunking. # the chunking method fails when part of a", "dataset != None text = text.replace(os.linesep, \"\") matches = [] for i in", "i) res = run(i, text, dataset) if len(res) > 0: for r in", "chunk # length, even if they are rounding remainder orphans from the #", "dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None assert dataset != None", "if found != -1: words = test.split() # don't return matched chunks shorter", "def striplist(l): # clean out some unneeded chars return [x.strip(\" \\t\\n\\r\") for x", "def chunkit(str, chunk_length): # make chunks of strings the way we like chunks", "when part of a dupe string # crosses the border between chunks import", "trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l):", "found = data.find(test) if found != -1: words = test.split() # don't return", "# length, even if they are rounding remainder orphans from the # chunking", "!= \"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset):", "for i in trange(max, min, -1): # print('text, i',text, i) res = run(i,", "\") testChunks = chunkit(text, chunk_length) # remove empty lines testChunks = list(filter(None, testChunks))", "if found != None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None,", "10 def striplist(l): # clean out some unneeded chars return [x.strip(\" \\t\\n\\r\") for", "striplist(l): # clean out some unneeded chars return [x.strip(\" \\t\\n\\r\") for x in", "dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) # remove empty lines testChunks =", "= run(i, text, dataset) if len(res) > 0: for r in res: if", "chunks from lst. for i in range(0, len(lst), n): yield lst[i : i", "chunks import colorama from colorama import Fore from tqdm import trange, tqdm import", "testChunks: found = compare([testLine, dataset, chunk_length]) if found != None: print(\"found\", found) results.append(found)", "current chunk # length, even if they are rounding remainder orphans from the", "text=None, dataset=None, verbose=False): assert text != None assert dataset != None text =", "process if len(words) >= chunk_length: return test def make_chunks(lst, n): # Yield successive", "n): # Yield successive n-sized chunks from lst. for i in range(0, len(lst),", "= [] for testLine in testChunks: found = compare([testLine, dataset, chunk_length]) if found", "verbose=False): assert text != None assert dataset != None text = text.replace(os.linesep, \"\")", "found != -1: words = test.split() # don't return matched chunks shorter than", "\\t\\n\\r\") for x in l] def compare(input): # main comparison function test =", "-1: words = test.split() # don't return matched chunks shorter than the current", "return chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks =", "results = [] for testLine in testChunks: found = compare([testLine, dataset, chunk_length]) if", "lst[i : i + n] def chunkit(str, chunk_length): # make chunks of strings", "orphans from the # chunking process if len(words) >= chunk_length: return test def", "a dupe string # crosses the border between chunks import colorama from colorama", "n): yield lst[i : i + n] def chunkit(str, chunk_length): # make chunks", "= text.replace(os.linesep, \"\") matches = [] for i in trange(max, min, -1): #", "successive n-sized chunks from lst. for i in range(0, len(lst), n): yield lst[i", "like chunks = [] list = str.split() list = striplist(list) wordLists = make_chunks(list,", "chunks shorter than the current chunk # length, even if they are rounding", "\" \") testChunks = chunkit(text, chunk_length) # remove empty lines testChunks = list(filter(None,", "chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text,", "# make chunks of strings the way we like chunks = [] list", "chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text,", "for i in range(0, len(lst), n): yield lst[i : i + n] def", "= compare([testLine, dataset, chunk_length]) if found != None: print(\"found\", found) results.append(found) return results", "assert text != None assert dataset != None text = text.replace(os.linesep, \"\") matches", "Yield successive n-sized chunks from lst. for i in range(0, len(lst), n): yield", "strings the way we like chunks = [] list = str.split() list =", "tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l): #", "list = str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in", "chunkit(text, chunk_length) # remove empty lines testChunks = list(filter(None, testChunks)) results = []", "the current chunk # length, even if they are rounding remainder orphans from", "x in l] def compare(input): # main comparison function test = input[0] data", "chunk_length): # make chunks of strings the way we like chunks = []", "chunking process if len(words) >= chunk_length: return test def make_chunks(lst, n): # Yield", "return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None assert", "print('text, i',text, i) res = run(i, text, dataset) if len(res) > 0: for", "= data.find(test) if found != -1: words = test.split() # don't return matched", "wordLists = make_chunks(list, chunk_length) for chunk in wordLists: if chunk != \"\": chunk", "# print('text, i',text, i) res = run(i, text, dataset) if len(res) > 0:", "len(words) >= chunk_length: return test def make_chunks(lst, n): # Yield successive n-sized chunks", "text = text.replace(os.linesep, \"\") matches = [] for i in trange(max, min, -1):", "test def make_chunks(lst, n): # Yield successive n-sized chunks from lst. for i", "# print(\"data\", data) found = data.find(test) if found != -1: words = test.split()", "chunk_length: return test def make_chunks(lst, n): # Yield successive n-sized chunks from lst.", "min, -1): # print('text, i',text, i) res = run(i, text, dataset) if len(res)", "function test = input[0] data = input[1] chunk_length = input[2] # print(\"data\", data)", "= [] for i in trange(max, min, -1): # print('text, i',text, i) res", "None text = text.replace(os.linesep, \"\") matches = [] for i in trange(max, min,", "!= None text = text.replace(os.linesep, \"\") matches = [] for i in trange(max,", "data = input[1] chunk_length = input[2] # print(\"data\", data) found = data.find(test) if", "= make_chunks(list, chunk_length) for chunk in wordLists: if chunk != \"\": chunk =", "str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in wordLists: if", "in trange(max, min, -1): # print('text, i',text, i) res = run(i, text, dataset)", ": i + n] def chunkit(str, chunk_length): # make chunks of strings the", "> 0: for r in res: if r not in matches: matches.append(r) return", "unneeded chars return [x.strip(\" \\t\\n\\r\") for x in l] def compare(input): # main", "return [x.strip(\" \\t\\n\\r\") for x in l] def compare(input): # main comparison function", "print(\"data\", data) found = data.find(test) if found != -1: words = test.split() #", "between chunks import colorama from colorama import Fore from tqdm import trange, tqdm", "DEFAULT_MAX = 10 def striplist(l): # clean out some unneeded chars return [x.strip(\"", "remove empty lines testChunks = list(filter(None, testChunks)) results = [] for testLine in", "use sliding windows instead of chunking. # the chunking method fails when part", "len(res) > 0: for r in res: if r not in matches: matches.append(r)", "[x.strip(\" \\t\\n\\r\") for x in l] def compare(input): # main comparison function test", "length, even if they are rounding remainder orphans from the # chunking process", "from the # chunking process if len(words) >= chunk_length: return test def make_chunks(lst,", "from tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX =", "= striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in wordLists: if chunk !=", "[] for i in trange(max, min, -1): # print('text, i',text, i) res =", "None assert dataset != None text = text.replace(os.linesep, \"\") matches = [] for", "None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert", "text.replace(os.linesep, \"\") matches = [] for i in trange(max, min, -1): # print('text,", "from colorama import Fore from tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN", ">= chunk_length: return test def make_chunks(lst, n): # Yield successive n-sized chunks from", "sliding windows instead of chunking. # the chunking method fails when part of", "found = compare([testLine, dataset, chunk_length]) if found != None: print(\"found\", found) results.append(found) return", "TODO: use sliding windows instead of chunking. # the chunking method fails when", "dataset, chunk_length]) if found != None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN,", "we like chunks = [] list = str.split() list = striplist(list) wordLists =", "data.find(test) if found != -1: words = test.split() # don't return matched chunks", "matched chunks shorter than the current chunk # length, even if they are", "compare(input): # main comparison function test = input[0] data = input[1] chunk_length =", "than the current chunk # length, even if they are rounding remainder orphans", "# the chunking method fails when part of a dupe string # crosses", "Fore from tqdm import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX", "test.split() # don't return matched chunks shorter than the current chunk # length,", "part of a dupe string # crosses the border between chunks import colorama", "data) found = data.find(test) if found != -1: words = test.split() # don't", "# Yield successive n-sized chunks from lst. for i in range(0, len(lst), n):", "way we like chunks = [] list = str.split() list = striplist(list) wordLists", "the chunking method fails when part of a dupe string # crosses the", "dataset): dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) # remove empty", "DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l): # clean out some unneeded", "= test.split() # don't return matched chunks shorter than the current chunk #", "text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) # remove", "= input[0] data = input[1] chunk_length = input[2] # print(\"data\", data) found =", "in l] def compare(input): # main comparison function test = input[0] data =", "chunk_length) for chunk in wordLists: if chunk != \"\": chunk = \" \".join(chunk)", "results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False): assert text != None assert dataset", "if len(res) > 0: for r in res: if r not in matches:", "run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length) #", "# TODO: use sliding windows instead of chunking. # the chunking method fails", "import trange, tqdm import os colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def", "don't return matched chunks shorter than the current chunk # length, even if", "yield lst[i : i + n] def chunkit(str, chunk_length): # make chunks of", "empty lines testChunks = list(filter(None, testChunks)) results = [] for testLine in testChunks:", "= list(filter(None, testChunks)) results = [] for testLine in testChunks: found = compare([testLine,", "are rounding remainder orphans from the # chunking process if len(words) >= chunk_length:", "matches = [] for i in trange(max, min, -1): # print('text, i',text, i)", "= input[2] # print(\"data\", data) found = data.find(test) if found != -1: words", "instead of chunking. # the chunking method fails when part of a dupe", "dataset) if len(res) > 0: for r in res: if r not in", "!= None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False):", "chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks", "from lst. for i in range(0, len(lst), n): yield lst[i : i +", "-1): # print('text, i',text, i) res = run(i, text, dataset) if len(res) >", "chunking method fails when part of a dupe string # crosses the border", "the way we like chunks = [] list = str.split() list = striplist(list)", "# crosses the border between chunks import colorama from colorama import Fore from", "0: for r in res: if r not in matches: matches.append(r) return matches", "for chunk in wordLists: if chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk)", "input[0] data = input[1] chunk_length = input[2] # print(\"data\", data) found = data.find(test)", "remainder orphans from the # chunking process if len(words) >= chunk_length: return test", "of chunking. # the chunking method fails when part of a dupe string", "chunk_length) # remove empty lines testChunks = list(filter(None, testChunks)) results = [] for", "out some unneeded chars return [x.strip(\" \\t\\n\\r\") for x in l] def compare(input):", "chunk_length]) if found != None: print(\"found\", found) results.append(found) return results def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX,", "method fails when part of a dupe string # crosses the border between", "# clean out some unneeded chars return [x.strip(\" \\t\\n\\r\") for x in l]", "chunking. # the chunking method fails when part of a dupe string #", "if chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length,", "# main comparison function test = input[0] data = input[1] chunk_length = input[2]", "text, dataset) if len(res) > 0: for r in res: if r not", "some unneeded chars return [x.strip(\" \\t\\n\\r\") for x in l] def compare(input): #", "if they are rounding remainder orphans from the # chunking process if len(words)", "i + n] def chunkit(str, chunk_length): # make chunks of strings the way", "def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \") testChunks = chunkit(text, chunk_length)", "in testChunks: found = compare([testLine, dataset, chunk_length]) if found != None: print(\"found\", found)", "\".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \" \")", "5 DEFAULT_MAX = 10 def striplist(l): # clean out some unneeded chars return", "[] list = str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length) for chunk", "in wordLists: if chunk != \"\": chunk = \" \".join(chunk) chunks.append(chunk) return chunks", "!= -1: words = test.split() # don't return matched chunks shorter than the", "chunks of strings the way we like chunks = [] list = str.split()", "# remove empty lines testChunks = list(filter(None, testChunks)) results = [] for testLine", "import colorama from colorama import Fore from tqdm import trange, tqdm import os", "= 10 def striplist(l): # clean out some unneeded chars return [x.strip(\" \\t\\n\\r\")", "+ n] def chunkit(str, chunk_length): # make chunks of strings the way we", "make_chunks(lst, n): # Yield successive n-sized chunks from lst. for i in range(0,", "even if they are rounding remainder orphans from the # chunking process if", "colorama.init(autoreset=True) DEFAULT_MIN = 5 DEFAULT_MAX = 10 def striplist(l): # clean out some", "\" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset = dataset.replace(os.linesep, \"", "colorama from colorama import Fore from tqdm import trange, tqdm import os colorama.init(autoreset=True)", "= input[1] chunk_length = input[2] # print(\"data\", data) found = data.find(test) if found", "words = test.split() # don't return matched chunks shorter than the current chunk", "chunk = \" \".join(chunk) chunks.append(chunk) return chunks def run(chunk_length, text, dataset): dataset =", "chunks = [] list = str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length)", "trange(max, min, -1): # print('text, i',text, i) res = run(i, text, dataset) if", "assert dataset != None text = text.replace(os.linesep, \"\") matches = [] for i", "crosses the border between chunks import colorama from colorama import Fore from tqdm", "fails when part of a dupe string # crosses the border between chunks", "border between chunks import colorama from colorama import Fore from tqdm import trange,", "striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in wordLists: if chunk != \"\":", "compare([testLine, dataset, chunk_length]) if found != None: print(\"found\", found) results.append(found) return results def", "input[1] chunk_length = input[2] # print(\"data\", data) found = data.find(test) if found !=", "def compare(input): # main comparison function test = input[0] data = input[1] chunk_length", "= str.split() list = striplist(list) wordLists = make_chunks(list, chunk_length) for chunk in wordLists:", "n-sized chunks from lst. for i in range(0, len(lst), n): yield lst[i :", "make chunks of strings the way we like chunks = [] list =" ]
[ "idx in range(args.number): run(split(cmd), log) log.close() return 0 if __name__ == '__main__': sys.exit(main())", "type=Path, required=True, help='Path of the output log file') return parser.parse_args() def run(cmd, logfile):", "300' for idx in range(args.number): run(split(cmd), log) log.close() return 0 if __name__ ==", "of the output log file') return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd,", "if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd = f'python {ga_path}", "ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path", "parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True,", "of the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output", "help='Path of the output log file') return parser.parse_args() def run(cmd, logfile): p =", "def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def main(): args =", "'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga')", "repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input',", "help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga')", "= parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select from [baseline,", "'--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from", "= open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input} -t 300' for idx", "= ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs')", "log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input}", "'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser", "for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str,", "args = parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select from", ": 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser =", "log file') return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p", "[baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True,", "mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output log file') return parser.parse_args()", "default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga,", "import split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' :", "parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga,", "-i {args.input} -t 300' for idx in range(args.number): run(split(cmd), log) log.close() return 0", "return p def main(): args = parse_args() if args.algorithm not in ga2path.keys(): raise", "for idx in range(args.number): run(split(cmd), log) log.close() return 0 if __name__ == '__main__':", "stdout=logfile) return p def main(): args = parse_args() if args.algorithm not in ga2path.keys():", "parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat", "= args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd =", "type=Path, required=True, help='Path of the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path", "} def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30,", "ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input for mTSP') parser.add_argument('-o', '--output',", "args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd = f'python", "the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output log", "output log file') return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return", "vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True)", "should select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output if", "the output log file') return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile)", "Path from shlex import split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' :", "log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input} -t", "baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input for mTSP')", "os import sys import subprocess from argparse import ArgumentParser from pathlib import Path", "import ArgumentParser from pathlib import Path from shlex import split ga2path = {", "def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number", "input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output log file')", "log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd", "parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output log file') return parser.parse_args() def", "in ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm]", "import Path from shlex import split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga'", "sys import subprocess from argparse import ArgumentParser from pathlib import Path from shlex", "<gh_stars>1-10 import os import sys import subprocess from argparse import ArgumentParser from pathlib", "-t 300' for idx in range(args.number): run(split(cmd), log) log.close() return 0 if __name__", "type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline,", "vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input for mTSP') parser.add_argument('-o',", "'--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path", "'--output', type=Path, required=True, help='Path of the output log file') return parser.parse_args() def run(cmd,", "runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path,", "'a+') cmd = f'python {ga_path} -i {args.input} -t 300' for idx in range(args.number):", "shlex import split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga'", "{ga_path} -i {args.input} -t 300' for idx in range(args.number): run(split(cmd), log) log.close() return", "= f'python {ga_path} -i {args.input} -t 300' for idx in range(args.number): run(split(cmd), log)", "cmd = f'python {ga_path} -i {args.input} -t 300' for idx in range(args.number): run(split(cmd),", "ga_path = ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log =", "not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+') cmd = f'python {ga_path} -i", "help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input", "parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of", "'--input', type=Path, required=True, help='Path of the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True,", "= ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path,", "run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def main(): args = parse_args()", "{ 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args():", "main(): args = parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select", "parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA", "argparse import ArgumentParser from pathlib import Path from shlex import split ga2path =", "p def main(): args = parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm", "raise Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path =", "ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a',", "pathlib import Path from shlex import split ga2path = { 'baseline' : 'GA/baseline/main.py',", "ArgumentParser from pathlib import Path from shlex import split ga2path = { 'baseline'", "subprocess from argparse import ArgumentParser from pathlib import Path from shlex import split", "required=True, help='Path of the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of", "def main(): args = parse_args() if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should", "from shlex import split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py',", "help='Path of the input for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the", "subprocess.Popen(cmd, stdout=logfile) return p def main(): args = parse_args() if args.algorithm not in", "of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i',", "file') return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def", "import subprocess from argparse import ArgumentParser from pathlib import Path from shlex import", "'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number', type=int,", "for mTSP') parser.add_argument('-o', '--output', type=Path, required=True, help='Path of the output log file') return", "logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def main(): args = parse_args() if", "from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output if not log_path.exists():", "'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n',", "ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' }", "= { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def", "{args.input} -t 300' for idx in range(args.number): run(split(cmd), log) log.close() return 0 if", "open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input} -t 300' for idx in", "'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running", "exist_ok=True) log = open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input} -t 300'", "parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input for mTSP') parser.add_argument('-o', '--output', type=Path,", "Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm', type=str, required=True,", "parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def main(): args", "ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log = open(log_path, 'a+')", "return parser.parse_args() def run(cmd, logfile): p = subprocess.Popen(cmd, stdout=logfile) return p def main():", "split ga2path = { 'baseline' : 'GA/baseline/main.py', 'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py'", "log = open(log_path, 'a+') cmd = f'python {ga_path} -i {args.input} -t 300' for", "from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the input for", "import os import sys import subprocess from argparse import ArgumentParser from pathlib import", "args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path", "f'python {ga_path} -i {args.input} -t 300' for idx in range(args.number): run(split(cmd), log) log.close()", ": 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt for", "scirpt for Ga') parser.add_argument('-n', '--number', type=int, default=30, help='Number of repeat runs') parser.add_argument('-a', '--algorithm',", "from argparse import ArgumentParser from pathlib import Path from shlex import split ga2path", "= subprocess.Popen(cmd, stdout=logfile) return p def main(): args = parse_args() if args.algorithm not", "not in ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path =", "'vns-ga' : 'GA/vns-ga/main.py', 'ipga' : 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt", "required=True, help='Path of the output log file') return parser.parse_args() def run(cmd, logfile): p", "required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of the", "Exception('Algorithm should select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output", "from pathlib import Path from shlex import split ga2path = { 'baseline' :", "ipga') ga_path = ga2path[args.algorithm] log_path = args.output if not log_path.exists(): log_path.parent.mkdir(parents=True, exist_ok=True) log", "type=str, required=True, help='GA from baseline, vns-ga, ipga') parser.add_argument('-i', '--input', type=Path, required=True, help='Path of", "import sys import subprocess from argparse import ArgumentParser from pathlib import Path from", "if args.algorithm not in ga2path.keys(): raise Exception('Algorithm should select from [baseline, vns-ga, ipga')", "select from [baseline, vns-ga, ipga') ga_path = ga2path[args.algorithm] log_path = args.output if not", ": 'GA/IPGA/main.py' } def parse_args(): parser = ArgumentParser(description='Running scirpt for Ga') parser.add_argument('-n', '--number',", "p = subprocess.Popen(cmd, stdout=logfile) return p def main(): args = parse_args() if args.algorithm" ]
[ "empty // Contains checks or Substring Matching <Value> in <Selector> <Value> not in", "+ key, operator=operator, value=value) def as_expression(self) -> str: \"\"\" // Equality & Inequality", "\"\"\" // Equality & Inequality checks <Selector> == <Value> <Selector> != <Value> //", "= key self.value = value class Filter: \"\"\"Filter to provide simple search functionality", "Consul. \"\"\" def __init__(self, selector: str, operator: str, value: str): self.selector = selector", "OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields:", "{}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{}", "or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator ==", "operator=operator, value=value) def as_expression(self) -> str: \"\"\" // Equality & Inequality checks <Selector>", "operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META +", "new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator,", "= \"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META =", "new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator:", "OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not", "__init__(self, selector: str, operator: str, value: str): self.selector = selector self.operator = operator", "elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator,", "FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of a key", "<Value> in <Selector> <Value> not in <Selector> <Selector> contains <Value> <Selector> not contains", "Emptiness checks <Selector> is empty <Selector> is not empty // Contains checks or", "OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not", "not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return", "str, value: str): self.key = key self.value = value class Filter: \"\"\"Filter to", "\"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class", "{}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return \"{}", "= value @staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod", "return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def as_expression(self) -> str: \"\"\"", "Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator", "\"Meta\" class KeyValuePair: \"\"\"Simple representation of a key value pair. \"\"\" def __init__(self,", "pair. \"\"\" def __init__(self, key: str, value: str): self.key = key self.value =", "\"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY:", "value: str): self.key = key self.value = value class Filter: \"\"\"Filter to provide", "contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class", "= selector self.operator = operator self.value = value @staticmethod def new_tag_filter(operator: str, value:", "= \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN", "is not empty // Contains checks or Substring Matching <Value> in <Selector> <Value>", "a key value pair. \"\"\" def __init__(self, key: str, value: str): self.key =", "== <Value> <Selector> != <Value> // Emptiness checks <Selector> is empty <Selector> is", "search functionality in Consul. \"\"\" def __init__(self, selector: str, operator: str, value: str):", "checks <Selector> is empty <Selector> is not empty // Contains checks or Substring", "\"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation", "Operators: \"\"\"Operator constants\"\"\" OPERATOR_EQUALITY = \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY", "value=value) def as_expression(self) -> str: \"\"\" // Equality & Inequality checks <Selector> ==", "constants\"\"\" OPERATOR_EQUALITY = \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not", "key value pair. \"\"\" def __init__(self, key: str, value: str): self.key = key", "str, operator: str, value: str): self.selector = selector self.operator = operator self.value =", "class KeyValuePair: \"\"\"Simple representation of a key value pair. \"\"\" def __init__(self, key:", "value @staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def", "<Selector> == <Value> <Selector> != <Value> // Emptiness checks <Selector> is empty <Selector>", "Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator", "{}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{}", "class Filter: \"\"\"Filter to provide simple search functionality in Consul. \"\"\" def __init__(self,", "class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair:", "self.selector = selector self.operator = operator self.value = value @staticmethod def new_tag_filter(operator: str,", "Matching <Value> in <Selector> <Value> not in <Selector> <Selector> contains <Value> <Selector> not", "value class Filter: \"\"\"Filter to provide simple search functionality in Consul. \"\"\" def", "\"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector,", "to provide simple search functionality in Consul. \"\"\" def __init__(self, selector: str, operator:", "of a key value pair. \"\"\" def __init__(self, key: str, value: str): self.key", "not in <Selector> <Selector> contains <Value> <Selector> not contains <Value> \"\"\" if self.operator", "Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator", "self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY", "\"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN =", "+ \".\" + key, operator=operator, value=value) def as_expression(self) -> str: \"\"\" // Equality", "FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of a key value pair. \"\"\"", "empty <Selector> is not empty // Contains checks or Substring Matching <Value> in", "KeyValuePair: \"\"\"Simple representation of a key value pair. \"\"\" def __init__(self, key: str,", "value: str): self.selector = selector self.operator = operator self.value = value @staticmethod def", "str, value: str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def as_expression(self)", "Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META", "str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def as_expression(self) -> str:", "name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of", "<Selector> <Value> not in <Selector> <Selector> contains <Value> <Selector> not contains <Value> \"\"\"", "{} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return", "constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of a", "or Substring Matching <Value> in <Selector> <Value> not in <Selector> <Selector> contains <Value>", "or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator ==", "return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator ==", "not empty // Contains checks or Substring Matching <Value> in <Selector> <Value> not", "\"\"\"Simple representation of a key value pair. \"\"\" def __init__(self, key: str, value:", "Substring Matching <Value> in <Selector> <Value> not in <Selector> <Selector> contains <Value> <Selector>", "= \"Meta\" class KeyValuePair: \"\"\"Simple representation of a key value pair. \"\"\" def", "// Contains checks or Substring Matching <Value> in <Selector> <Value> not in <Selector>", "provide simple search functionality in Consul. \"\"\" def __init__(self, selector: str, operator: str,", "operator: str, value: str): self.selector = selector self.operator = operator self.value = value", "& Inequality checks <Selector> == <Value> <Selector> != <Value> // Emptiness checks <Selector>", "checks or Substring Matching <Value> in <Selector> <Value> not in <Selector> <Selector> contains", "empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS =", "\"\"\"Filter to provide simple search functionality in Consul. \"\"\" def __init__(self, selector: str,", "Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator", "== Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif", "OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\"", "= operator self.value = value @staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS,", "is empty <Selector> is not empty // Contains checks or Substring Matching <Value>", "Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator", "<Selector> != <Value> // Emptiness checks <Selector> is empty <Selector> is not empty", "<Value> <Selector> != <Value> // Emptiness checks <Selector> is empty <Selector> is not", "\"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name", "<Selector> is not empty // Contains checks or Substring Matching <Value> in <Selector>", "\"\"\" def __init__(self, selector: str, operator: str, value: str): self.selector = selector self.operator", "str): self.selector = selector self.operator = operator self.value = value @staticmethod def new_tag_filter(operator:", "= \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS =", "<Selector> is empty <Selector> is not empty // Contains checks or Substring Matching", "str: \"\"\" // Equality & Inequality checks <Selector> == <Value> <Selector> != <Value>", "\"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS", "\"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\"", "class Operators: \"\"\"Operator constants\"\"\" OPERATOR_EQUALITY = \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\"", "\"\"\" def __init__(self, key: str, value: str): self.key = key self.value = value", "<Selector> <Selector> contains <Value> <Selector> not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY", "<Selector> not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY:", "contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{}", "self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {}", "self.key = key self.value = value class Filter: \"\"\"Filter to provide simple search", "operator self.value = value @staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator,", "key, operator=operator, value=value) def as_expression(self) -> str: \"\"\" // Equality & Inequality checks", "def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str,", "== Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or", "self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {}", "{} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return", "self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector,", "OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS =", "Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def as_expression(self) -> str: \"\"\" //", "self.operator = operator self.value = value @staticmethod def new_tag_filter(operator: str, value: str): return", "self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return \"{} {} {}\".format(self.selector, self.operator, self.value)", "\"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN =", "str): self.key = key self.value = value class Filter: \"\"\"Filter to provide simple", "<Value> <Selector> not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator ==", "self.value) elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value,", "in <Selector> <Selector> contains <Value> <Selector> not contains <Value> \"\"\" if self.operator ==", "Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple", "as_expression(self) -> str: \"\"\" // Equality & Inequality checks <Selector> == <Value> <Selector>", "elif self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator,", "selector self.operator = operator self.value = value @staticmethod def new_tag_filter(operator: str, value: str):", "str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str, value: str):", "def __init__(self, key: str, value: str): self.key = key self.value = value class", "simple search functionality in Consul. \"\"\" def __init__(self, selector: str, operator: str, value:", "{} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return", "== Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or", "Filter: \"\"\"Filter to provide simple search functionality in Consul. \"\"\" def __init__(self, selector:", "= \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN", "-> str: \"\"\" // Equality & Inequality checks <Selector> == <Value> <Selector> !=", "self.value = value @staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value)", "return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_EMPTY or self.operator ==", "self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS", "self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN", "elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return \"{} {} {}\".format(self.selector, self.operator,", "= \"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\"", "operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def", "in <Selector> <Value> not in <Selector> <Selector> contains <Value> <Selector> not contains <Value>", "self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return \"{} {} {}\".format(self.selector,", "== Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or", "<Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {}", "value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str, value:", "key self.value = value class Filter: \"\"\"Filter to provide simple search functionality in", "checks <Selector> == <Value> <Selector> != <Value> // Emptiness checks <Selector> is empty", "\"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS:", "contains <Value> <Selector> not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or self.operator", "in Consul. \"\"\" def __init__(self, selector: str, operator: str, value: str): self.selector =", "OPERATOR_EQUALITY = \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY = \"not empty\"", "if self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator,", "value pair. \"\"\" def __init__(self, key: str, value: str): self.key = key self.value", "value=value) @staticmethod def new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\"", "== Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif", "= \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\"", "= \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\"", "@staticmethod def new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\" +", "Inequality checks <Selector> == <Value> <Selector> != <Value> // Emptiness checks <Selector> is", "\"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of a key value pair.", "Contains checks or Substring Matching <Value> in <Selector> <Value> not in <Selector> <Selector>", "\"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\"", "== Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif", "self.value = value class Filter: \"\"\"Filter to provide simple search functionality in Consul.", "self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator == Operators.OPERATOR_NOT_CONTAINS: return \"{} {}", "self.operator == Operators.OPERATOR_EMPTY or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value)", "\".\" + key, operator=operator, value=value) def as_expression(self) -> str: \"\"\" // Equality &", "self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector)", "selector: str, operator: str, value: str): self.selector = selector self.operator = operator self.value", "return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator ==", "OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS", "<Selector> contains <Value> <Selector> not contains <Value> \"\"\" if self.operator == Operators.OPERATOR_EQUALITY or", "representation of a key value pair. \"\"\" def __init__(self, key: str, value: str):", "def __init__(self, selector: str, operator: str, value: str): self.selector = selector self.operator =", "!= <Value> // Emptiness checks <Selector> is empty <Selector> is not empty //", "return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str, value: str): return", "value: str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value) def as_expression(self) ->", "str, operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\" + key, operator=operator, value=value)", "str, value: str): self.selector = selector self.operator = operator self.value = value @staticmethod", "<Value> not in <Selector> <Selector> contains <Value> <Selector> not contains <Value> \"\"\" if", "def new_meta_filter(key: str, operator: str, value: str): return Filter(selector=Fields.FIELD_META + \".\" + key,", "= value class Filter: \"\"\"Filter to provide simple search functionality in Consul. \"\"\"", "@staticmethod def new_tag_filter(operator: str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key:", "in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name constants\"\"\"", "\"\"\"Operator constants\"\"\" OPERATOR_EQUALITY = \"==\" OPERATOR_INEQUALITY = \"!=\" OPERATOR_EMPTY = \"empty\" OPERATOR_NOT_EMPTY =", "<Value> // Emptiness checks <Selector> is empty <Selector> is not empty // Contains", "OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field name constants\"\"\" FIELD_TAGS = \"Tags\" FIELD_META", "key: str, value: str): self.key = key self.value = value class Filter: \"\"\"Filter", "// Emptiness checks <Selector> is empty <Selector> is not empty // Contains checks", "= \"Tags\" FIELD_META = \"Meta\" class KeyValuePair: \"\"\"Simple representation of a key value", "// Equality & Inequality checks <Selector> == <Value> <Selector> != <Value> // Emptiness", "Equality & Inequality checks <Selector> == <Value> <Selector> != <Value> // Emptiness checks", "Operators.OPERATOR_NOT_IN: return \"{} {} {}\".format(self.value, self.operator, self.selector) elif self.operator == Operators.OPERATOR_CONTAINS or self.operator", "\"empty\" OPERATOR_NOT_EMPTY = \"not empty\" OPERATOR_IN = \"in\" OPERATOR_NOT_IN = \"not in\" OPERATOR_CONTAINS", "def as_expression(self) -> str: \"\"\" // Equality & Inequality checks <Selector> == <Value>", "self.operator == Operators.OPERATOR_EQUALITY or self.operator == Operators.OPERATOR_INEQUALITY: return \"{} {} {}\".format(self.selector, self.operator, self.value)", "= \"not in\" OPERATOR_CONTAINS = \"contains\" OPERATOR_NOT_CONTAINS = \"not contains\" class Fields: \"\"\"Field", "functionality in Consul. \"\"\" def __init__(self, selector: str, operator: str, value: str): self.selector", "or self.operator == Operators.OPERATOR_NOT_EMPTY: return \"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator ==", "str, value: str): return Filter(selector=Fields.FIELD_TAGS, operator=operator, value=value) @staticmethod def new_meta_filter(key: str, operator: str,", "__init__(self, key: str, value: str): self.key = key self.value = value class Filter:", "\"{} {} {}\".format(self.selector, self.operator, self.value) elif self.operator == Operators.OPERATOR_IN or self.operator == Operators.OPERATOR_NOT_IN:" ]
[ "import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\"", "url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\")", "def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert", "pytest from wayback_machine_archiver.archiver import download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP =", "session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents ==", "= requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url =", "session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url", "import pytest from wayback_machine_archiver.archiver import download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP", "\"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session", "http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture", "= \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url>", "<loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session", "from wayback_machine_archiver.archiver import download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml", "session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url,", "SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod>", "<url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session():", "return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url,", "= download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml'", "SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP, status_code=404) with pytest.raises(requests.exceptions.HTTPError): download_remote_sitemap(url,", "encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod>", "HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents =", "= 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def", "<gh_stars>10-100 import pytest from wayback_machine_archiver.archiver import download_remote_sitemap from requests.adapters import HTTPAdapter import requests", "HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP)", "session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session):", "requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml'", "download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset", "<urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url>", "session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP,", "<loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter())", "<url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\",", "@pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def", "'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock,", "xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\"", "download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url,", "from requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", "assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP, status_code=404)", "def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP, status_code=404) with pytest.raises(requests.exceptions.HTTPError): download_remote_sitemap(url, session)", "import download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>", "\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url>", "HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">", "def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock,", "requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session):", "returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url =", "</urlset> \"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter()) return", "requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9", "requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc>", "session.mount(\"http://\", HTTPAdapter()) return session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents", "session def test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session)", "returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP, status_code=404) with", "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset>", "</url> </urlset> \"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\", HTTPAdapter())", "test_download_remote_sitemap(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents", "== SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url = 'https://www.radiokeysmusic.com/sitemap.xml' requests_mock.get(url, text=SITEMAP, status_code=404) with pytest.raises(requests.exceptions.HTTPError):", "text=SITEMAP) returned_contents = download_remote_sitemap(url, session) assert returned_contents == SITEMAP.encode(\"UTF-8\") def test_download_remote_sitemap_with_status_error(requests_mock, session): url", "version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc>", "<lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session = requests.Session() session.mount(\"https://\", HTTPAdapter()) session.mount(\"http://\",", "xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url> <loc>https://alexgude.com/blog/double-checking-538/</loc> <lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def", "<lastmod>2016-04-28T00:00:00+00:00</lastmod> </url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session =", "wayback_machine_archiver.archiver import download_remote_sitemap from requests.adapters import HTTPAdapter import requests SITEMAP = \"\"\"<?xml version=\"1.0\"", "import requests SITEMAP = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> <urlset xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\" xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"> <url>", "</url> <url> <loc>https://alexgude.com/files/undergrad_thesis.pdf</loc> <lastmod>2019-05-09T16:19:45+00:00</lastmod> </url> </urlset> \"\"\" @pytest.fixture def session(): session = requests.Session()" ]
[ "context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target", "connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust ping in", "mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust ping in response await responder.send_outbound(Ping(),", "RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import Ping", "...base_handler import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager", "BaseResponder): \"\"\" Handle connection response. Args: context: Request context responder: Responder callback \"\"\"", "from ..manager import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for", "called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await", "from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def", "responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr", "Args: context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\")", "Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def handle(self, context: RequestContext,", "import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler", "connection response. Args: context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with", "import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def handle(self, context:", "= await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust ping in response", "ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust ping", "for connection responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection", "\"\"\"Connection response handler.\"\"\" from ...base_handler import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse", "context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse)", "Handle connection response. Args: context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called", "BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping", "= ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust", "response. Args: context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context", "\"\"\"Handler class for connection responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\"", "import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from", "import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\"", "mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send", "assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await", "handler.\"\"\" from ...base_handler import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager", "self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection =", "ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class", "callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context)", "connection responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response.", "from ...base_handler import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import", "BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import", "context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert", "context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args: context: Request context responder:", "RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args: context: Request context responder: Responder", "\"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection", "handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args: context: Request context", "Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message,", "await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) # send trust ping in response await", "from ..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import Ping class", "class for connection responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle", "isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection)", "class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def handle(self, context: RequestContext, responder:", "async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args: context:", "def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args: context: Request", "responder: BaseResponder): \"\"\" Handle connection response. Args: context: Request context responder: Responder callback", "response handler.\"\"\" from ...base_handler import BaseHandler, BaseResponder, RequestContext from ..messages.connection_response import ConnectionResponse from", "responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder): \"\"\" Handle connection response. Args:", "\"\"\" Handle connection response. Args: context: Request context responder: Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler", "with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message)", "ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def handle(self, context: RequestContext, responder: BaseResponder):", "Responder callback \"\"\" self._logger.debug(f\"ConnectionResponseHandler called with context {context}\") assert isinstance(context.message, ConnectionResponse) mgr =", "{context}\") assert isinstance(context.message, ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target =", "..messages.connection_response import ConnectionResponse from ..manager import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler):", "ConnectionResponse) mgr = ConnectionManager(context) connection = await mgr.accept_response(context.message) target = await mgr.get_connection_target(connection) #", "...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async def handle(self,", "target = await mgr.get_connection_target(connection) # send trust ping in response await responder.send_outbound(Ping(), target)", "..manager import ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection", "ConnectionManager from ...trustping.messages.ping import Ping class ConnectionResponseHandler(BaseHandler): \"\"\"Handler class for connection responses.\"\"\" async" ]
[ "x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp) /", "= (line2_start[1] + line2_end[1]) / 2 # Calculate the average between each midpoint", "class CartesianVals: def __init__(self, x, y): self.x = x self.y = y #", "do math with custom objects as opposed to arrays and tuples, so each", "opposed to arrays and tuples, so each necessary point will # be assigned", "midpoints of two lines # Basically a centroid, but without doing a bunch", "+ line2_end[1]) / 2 # Calculate the average between each midpoint x_avg =", "line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates for", "line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg)", "somewhere on the pixel array class CartesianVals: def __init__(self, x, y): self.x =", "line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1]) /", "in each dimension for each line line1_x_mp = (line1_start[0] + line1_end[0]) / 2", "line2_end): # Calculate midpoints in each dimension for each line line1_x_mp = (line1_start[0]", "# Calculate midpoints in each dimension for each line line1_x_mp = (line1_start[0] +", "doing a bunch of weird linear algebra stuff with numpy def seg_avg(line1_start, line1_end,", "# It's easy (and fun) to do math with custom objects as opposed", "- other.y return CartesianVals(x, y) # Calculate the average between the midpoints of", "arrays and tuples, so each necessary point will # be assigned to a", "easy (and fun) to do math with custom objects as opposed to arrays", "/ 2 # Calculate the average between each midpoint x_avg = (line1_x_mp +", "against each other def __sub__(self, other): x = self.x - other.x y =", "a bunch of weird linear algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start,", "x = self.x - other.x y = self.y - other.y return CartesianVals(x, y)", "# Load two empty Cartesian coordinates for now, to be used in main.py", "line line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1])", "on the pixel array class CartesianVals: def __init__(self, x, y): self.x = x", "stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in each", "a centroid, but without doing a bunch of weird linear algebra stuff with", "self.y - other.y return CartesianVals(x, y) # Calculate the average between the midpoints", "each line line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1] +", "algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in", "necessary point will # be assigned to a Cartesian coordinate that fits somewhere", "/ 2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0] +", "line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0]) /", "with custom objects as opposed to arrays and tuples, so each necessary point", "tuples, so each necessary point will # be assigned to a Cartesian coordinate", "objects as opposed to arrays and tuples, so each necessary point will #", "linear algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints", "with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in each dimension", "each midpoint x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp +", "return CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates for now, to be", "coordinate that fits somewhere on the pixel array class CartesianVals: def __init__(self, x,", "midpoints in each dimension for each line line1_x_mp = (line1_start[0] + line1_end[0]) /", "At some point, we subtract two Cartesian coordinates against each other def __sub__(self,", "two lines # Basically a centroid, but without doing a bunch of weird", "CartesianVals: def __init__(self, x, y): self.x = x self.y = y # At", "# At some point, we subtract two Cartesian coordinates against each other def", "each other def __sub__(self, other): x = self.x - other.x y = self.y", "<filename>ascended/imath.py # It's easy (and fun) to do math with custom objects as", "pixel array class CartesianVals: def __init__(self, x, y): self.x = x self.y =", "return CartesianVals(x, y) # Calculate the average between the midpoints of two lines", "self.y = y # At some point, we subtract two Cartesian coordinates against", "Calculate the average between each midpoint x_avg = (line1_x_mp + line2_x_mp) / 2", "other def __sub__(self, other): x = self.x - other.x y = self.y -", "line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1]) /", "Basically a centroid, but without doing a bunch of weird linear algebra stuff", "numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in each dimension for", "for each line line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1]", "= (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load two empty", "x, y): self.x = x self.y = y # At some point, we", "we subtract two Cartesian coordinates against each other def __sub__(self, other): x =", "of weird linear algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): #", "now, to be used in main.py l_flare_loc = CartesianVals(0, 0) r_flare_loc = CartesianVals(0,", "centroid, but without doing a bunch of weird linear algebra stuff with numpy", "y # At some point, we subtract two Cartesian coordinates against each other", "and tuples, so each necessary point will # be assigned to a Cartesian", "2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2 # Calculate the average between", "+ line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2 # Calculate", "(line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp) / 2 return", "CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates for now, to be used", "point will # be assigned to a Cartesian coordinate that fits somewhere on", "y_avg = (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load two", "of two lines # Basically a centroid, but without doing a bunch of", "line1_end, line2_start, line2_end): # Calculate midpoints in each dimension for each line line1_x_mp", "but without doing a bunch of weird linear algebra stuff with numpy def", "/ 2 y_avg = (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) #", "= y # At some point, we subtract two Cartesian coordinates against each", "line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2 # Calculate the", "lines # Basically a centroid, but without doing a bunch of weird linear", "the pixel array class CartesianVals: def __init__(self, x, y): self.x = x self.y", "fits somewhere on the pixel array class CartesianVals: def __init__(self, x, y): self.x", "as opposed to arrays and tuples, so each necessary point will # be", "(and fun) to do math with custom objects as opposed to arrays and", "- other.x y = self.y - other.y return CartesianVals(x, y) # Calculate the", "each necessary point will # be assigned to a Cartesian coordinate that fits", "2 return CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates for now, to", "the average between the midpoints of two lines # Basically a centroid, but", "It's easy (and fun) to do math with custom objects as opposed to", "2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1])", "y_avg) # Load two empty Cartesian coordinates for now, to be used in", "Cartesian coordinate that fits somewhere on the pixel array class CartesianVals: def __init__(self,", "(line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp", "Cartesian coordinates against each other def __sub__(self, other): x = self.x - other.x", "= x self.y = y # At some point, we subtract two Cartesian", "some point, we subtract two Cartesian coordinates against each other def __sub__(self, other):", "custom objects as opposed to arrays and tuples, so each necessary point will", "average between the midpoints of two lines # Basically a centroid, but without", "/ 2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2 # Calculate the average", "line2_end[1]) / 2 # Calculate the average between each midpoint x_avg = (line1_x_mp", "/ 2 return CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates for now,", "Calculate midpoints in each dimension for each line line1_x_mp = (line1_start[0] + line1_end[0])", "= self.x - other.x y = self.y - other.y return CartesianVals(x, y) #", "2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0])", "each dimension for each line line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp", "dimension for each line line1_x_mp = (line1_start[0] + line1_end[0]) / 2 line1_y_mp =", "= (line1_start[0] + line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2", "def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in each dimension for each", "line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1]", "/ 2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1] +", "to arrays and tuples, so each necessary point will # be assigned to", "self.x = x self.y = y # At some point, we subtract two", "y = self.y - other.y return CartesianVals(x, y) # Calculate the average between", "x self.y = y # At some point, we subtract two Cartesian coordinates", "between each midpoint x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp", "coordinates against each other def __sub__(self, other): x = self.x - other.x y", "so each necessary point will # be assigned to a Cartesian coordinate that", "__sub__(self, other): x = self.x - other.x y = self.y - other.y return", "y): self.x = x self.y = y # At some point, we subtract", "(line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load two empty Cartesian", "assigned to a Cartesian coordinate that fits somewhere on the pixel array class", "bunch of weird linear algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end):", "+ line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp =", "Cartesian coordinates for now, to be used in main.py l_flare_loc = CartesianVals(0, 0)", "midpoint x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp)", "y) # Calculate the average between the midpoints of two lines # Basically", "without doing a bunch of weird linear algebra stuff with numpy def seg_avg(line1_start,", "CartesianVals(x, y) # Calculate the average between the midpoints of two lines #", "= self.y - other.y return CartesianVals(x, y) # Calculate the average between the", "other.x y = self.y - other.y return CartesianVals(x, y) # Calculate the average", "point, we subtract two Cartesian coordinates against each other def __sub__(self, other): x", "to be used in main.py l_flare_loc = CartesianVals(0, 0) r_flare_loc = CartesianVals(0, 0)", "average between each midpoint x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg =", "2 # Calculate the average between each midpoint x_avg = (line1_x_mp + line2_x_mp)", "# be assigned to a Cartesian coordinate that fits somewhere on the pixel", "def __sub__(self, other): x = self.x - other.x y = self.y - other.y", "other.y return CartesianVals(x, y) # Calculate the average between the midpoints of two", "for now, to be used in main.py l_flare_loc = CartesianVals(0, 0) r_flare_loc =", "line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0]", "__init__(self, x, y): self.x = x self.y = y # At some point,", "+ line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load two empty Cartesian coordinates", "be assigned to a Cartesian coordinate that fits somewhere on the pixel array", "other): x = self.x - other.x y = self.y - other.y return CartesianVals(x,", "+ line1_end[0]) / 2 line1_y_mp = (line1_start[1] + line1_end[1]) / 2 line2_x_mp =", "def __init__(self, x, y): self.x = x self.y = y # At some", "to a Cartesian coordinate that fits somewhere on the pixel array class CartesianVals:", "array class CartesianVals: def __init__(self, x, y): self.x = x self.y = y", "fun) to do math with custom objects as opposed to arrays and tuples,", "the average between each midpoint x_avg = (line1_x_mp + line2_x_mp) / 2 y_avg", "will # be assigned to a Cartesian coordinate that fits somewhere on the", "math with custom objects as opposed to arrays and tuples, so each necessary", "between the midpoints of two lines # Basically a centroid, but without doing", "coordinates for now, to be used in main.py l_flare_loc = CartesianVals(0, 0) r_flare_loc", "# Calculate the average between each midpoint x_avg = (line1_x_mp + line2_x_mp) /", "the midpoints of two lines # Basically a centroid, but without doing a", "# Basically a centroid, but without doing a bunch of weird linear algebra", "= (line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2", "2 y_avg = (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg, y_avg) # Load", "a Cartesian coordinate that fits somewhere on the pixel array class CartesianVals: def", "two Cartesian coordinates against each other def __sub__(self, other): x = self.x -", "to do math with custom objects as opposed to arrays and tuples, so", "# Calculate the average between the midpoints of two lines # Basically a", "line2_y_mp = (line2_start[1] + line2_end[1]) / 2 # Calculate the average between each", "seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate midpoints in each dimension for each line", "Load two empty Cartesian coordinates for now, to be used in main.py l_flare_loc", "= (line1_x_mp + line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp) / 2", "(line2_start[0] + line2_end[0]) / 2 line2_y_mp = (line2_start[1] + line2_end[1]) / 2 #", "empty Cartesian coordinates for now, to be used in main.py l_flare_loc = CartesianVals(0,", "line2_start, line2_end): # Calculate midpoints in each dimension for each line line1_x_mp =", "that fits somewhere on the pixel array class CartesianVals: def __init__(self, x, y):", "two empty Cartesian coordinates for now, to be used in main.py l_flare_loc =", "weird linear algebra stuff with numpy def seg_avg(line1_start, line1_end, line2_start, line2_end): # Calculate", "self.x - other.x y = self.y - other.y return CartesianVals(x, y) # Calculate", "subtract two Cartesian coordinates against each other def __sub__(self, other): x = self.x", "(line2_start[1] + line2_end[1]) / 2 # Calculate the average between each midpoint x_avg", "+ line2_x_mp) / 2 y_avg = (line1_y_mp + line2_y_mp) / 2 return CartesianVals(x_avg,", "Calculate the average between the midpoints of two lines # Basically a centroid,", "(line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2 line2_y_mp", "= (line1_start[1] + line1_end[1]) / 2 line2_x_mp = (line2_start[0] + line2_end[0]) / 2" ]
[ "bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m, nn.Linear):", "from fastai.vision.all import * from ..augmentations import * from ..layers import * from", "loss does not decrease. Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer:", "= self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer to", "self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def", "= F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb,", "cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO", "loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False)", "in range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss", "in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if", "Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum.", "(targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()):", "* from ..layers import * from ..models.vision_transformer import * # Cell class DINOHead(nn.Module):", "F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred", "cmom: Center update momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995, for", "n == 'weight_v' : p.requires_grad = True def lf(self, pred, *yb): \"Multi crop", "SchedExp tpt: Teacher temperature after warm up. Decrease if training loss does not", "at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3,", "= [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb, aug in zip(xbs, self.augs)]", "tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm", "bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers == 1: self.mlp = nn.Linear(in_dim,", "def before_fit(self): \"Create teacher model as a copy of student\" self.learn.loss_func = self.lf", "use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim,", "[aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb, aug in zip(xbs, self.augs)] return", "forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x)", "min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class", "self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU())", "layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))", "https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__()", "n == 'weight_v' : p.requires_grad = False def before_batch(self): \"Augment multi crop views\"", "otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import", "(256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched:", "temperature. freeze_last_layer: How many epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs", "RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy", "1) if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim,", "by the user (graph leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html", "in range(n_targs): for pi in range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean()", "([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] #", "for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature", ": p.requires_grad = True def lf(self, pred, *yb): \"Multi crop cross entropy loss:", "in idxs for xb, aug in zip(xbs, self.augs)] return show_batch(images[0], None, images, max_n=len(images),", "entropy loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb", "self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' :", "self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in", "nbs/15 - dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] #", "\"A module for loading and saving all training params together\" self.student,self.teacher = student,teacher", "\"Create teacher model as a copy of student\" self.learn.loss_func = self.lf self.tpt =", "self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss,", "Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up.", "self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x # Cell", "- self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and", "zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines #", "Center update momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small", "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise", "..layers import * from ..models.vision_transformer import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy:", "Teacher update momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for", "idxs for xb, aug in zip(xbs, self.augs)] return show_batch(images[0], None, images, max_n=len(images), nrows=n)", "print_augs=False): \"\"\" DINO teacher student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41", "tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs: print(aug) def before_fit(self): \"Create teacher", "@torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images", "TODO: Do we need to put the teacher in eval(), not it original", "and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x)", "for ti in range(n_targs): for pi in range(n_preds): if ti != pi: loss", "__init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1)", "Cell from fastai.vision.all import * from ..augmentations import * from ..layers import *", "nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0)", "self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer", "batches or 0.996 for large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct:", "i in idxs for xb, aug in zip(xbs, self.augs)] return show_batch(images[0], None, images,", "= self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n ==", "import * from ..models.vision_transformer import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError:", "def lf(self, pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\" yb = yb[0]", "batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training for warmup", "self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9,", "!= pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self, n=1):", "**kwargs) return aug_pipelines # Cell class DINOModel(Module): def __init__(self, student, teacher): \"A module", "*yb): \"Multi crop cross entropy loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred", "it original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb =", "* from ..models.vision_transformer import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only", "self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data * (1. - self.tmom) def", "put the teacher in eval(), not it original repo? with torch.no_grad(): targs =", "out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers", "= self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p", "= yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb - self.model.C)", "self.tps, dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds =", "= False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear)", "e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease if training", "self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight,", "epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last", "'weight_v' : p.requires_grad = True def lf(self, pred, *yb): \"Multi crop cross entropy", "we need to put the teacher in eval(), not it original repo? with", "explicitly by the user (graph leaves) support the deepcopy protocol at the moment", "self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug in", "to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct],", "def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom", "at the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if", "for pi in range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs", "training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad =", "in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data * (1. -", "= 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0.,", "Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines =", "teacher model as a copy of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.)", "**kwargs): aug_pipelines = [] for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales,", "freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)])", "ti in range(n_targs): for pi in range(n_preds): if ti != pi: loss +=", "self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v'", "larger, e.g. 0.9995, for small batches or 0.996 for large batches (256+). tpt_warmup:", "+= get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module): def __init__(self,", "def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1,", "= self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96),", "all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad", "= targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data =", "n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i]", "(1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center", "size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs),", "DINO teacher student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center", "= self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def", "moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):", "range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad()", "freeze_last_layer: How many epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs =", "cross entropy loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred / self.tps, dim=-1)", "of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer:", "crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large", "update momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches", "self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad", "not decrease. Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer: How many", "Student temperature. freeze_last_layer: How many epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps')", "import * from ..layers import * from ..models.vision_transformer import * # Cell class", "momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for large batches", "tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher", "images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb, aug in zip(xbs,", "decrease. Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer: How many epochs", "param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def", "std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self,", "many epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler", "pred = F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt,", "DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04,", "= self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i] for", "'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc,", "for i in idxs for xb, aug in zip(xbs, self.augs)] return show_batch(images[0], None,", "return self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1],", "if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02)", "[] for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines +=", "x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs):", "(-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs", "e.g. 0.9995, for small batches or 0.996 for large batches (256+). tpt_warmup: Warm", "warm up. Decrease if training loss does not decrease. Smaller temperature means more", "+ self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update", "Cell class DINOModel(Module): def __init__(self, student, teacher): \"A module for loading and saving", "max(nlayers, 1) if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers =", "= [] for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines", "self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs: print(aug) def before_fit(self):", "_init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is", "hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer =", "1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim))", "= self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i", "teacher student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update", "self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self):", "/ npairs return loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs =", "student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p", "9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin,", "== 'weight_v' : p.requires_grad = False def before_batch(self): \"Augment multi crop views\" self.bs", "torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self):", "the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim,", "in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines", "not it original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb", "= max(nlayers, 1) if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers", "trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def", "= False def before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb =", "targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data", "m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not", "after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the", "after_epoch(self): \"Update tpt at the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom", "print(\"Setting last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v'", "\"Multi crop cross entropy loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred /", "return loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n,", "keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data *", "is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x) x =", "module for loading and saving all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict())", "SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease if training loss", "self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if", "nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x =", "for loading and saving all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for", "to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum. Set", "'DINOModel', 'DINO'] # Cell from fastai.vision.all import * from ..augmentations import * from", "False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and", "nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _", "nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if", "p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class DINO(Callback):", "targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for", "None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1,", "for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim,", "Decrease if training loss does not decrease. Smaller temperature means more sharpening. tps:", "self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\")", "store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if", "aug in self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do", "trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = True", "False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid =", "import * from ..augmentations import * from ..layers import * from ..models.vision_transformer import", "if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None:", "last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' :", ": p.requires_grad = False def before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0)", "self._momentum_update_center() def after_epoch(self): \"Update tpt at the end of each epoch\" self.tpt =", "= nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m):", "original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0,", "forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines,", "npairs return loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs),", "if print_augs: for aug in self.augs: print(aug) def before_fit(self): \"Create teacher model as", "== 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn:", "for aug in self.augs: print(aug) def before_fit(self): \"Create teacher model as a copy", "xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for", "''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers =", "'DINO'] # Cell from fastai.vision.all import * from ..augmentations import * from ..layers", "param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data * (1.", "class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user (graph", "= self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s,", "/ self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds)", "== 'weight_v' : p.requires_grad = True def lf(self, pred, *yb): \"Multi crop cross", "distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update", "crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell", "crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size, mins, maxs in", "self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in", "copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the", "in self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do we", "self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self,", "def forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True def __init__(self,", "+= (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self, n=1): xbs = self.learn.xb[0]", "training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom:", "tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches or 0.996", "need to put the teacher in eval(), not it original repo? with torch.no_grad():", "param_t.data = param_t.data * self.tmom + param_s.data * (1. - self.tmom) def _momentum_update_center(self):", "super().__init__() nlayers = max(nlayers, 1) if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim)", "hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim))", "i in self.large_crop_ids] # TODO: Do we need to put the teacher in", "p.requires_grad = True def lf(self, pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\"", "Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer: How many epochs to", "= np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for", "p.requires_grad = False def before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb", "lf(self, pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\" yb = yb[0] pred", "AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified).", "nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size,", "= ([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids]", "to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel',", "pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs):", "starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up scheduler, e.g.", "__all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import * from", "def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size, mins,", "2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights)", "'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import * from ..augmentations import *", "nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if", "use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn:", "DINOModel(Module): def __init__(self, student, teacher): \"A module for loading and saving all training", "max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size, mins, maxs in zip(num_crops, crop_sizes,", "idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs", "p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) #", "size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module): def __init__(self, student, teacher):", "tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm", "tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training", "_momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom +", "if self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters():", "True def lf(self, pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\" yb =", "= student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def", "layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if", "n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = False def before_batch(self):", "nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2)", "0, n_targs*(n_preds-1) for ti in range(n_targs): for pi in range(n_preds): if ti !=", "isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x", "= (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(),", "ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self,", "How many epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines", "layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start,", "up. Decrease if training loss does not decrease. Smaller temperature means more sharpening.", "min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size, mins, maxs in zip(num_crops,", "teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the end of each", "= tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs: print(aug) def before_fit(self): \"Create", "maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return", "if n == 'weight_v' : p.requires_grad = True def lf(self, pred, *yb): \"Multi", "dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs", "dim=-1, p=2) x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale'])", "for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x)", "# Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the", "self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n ==", "# Cell class DINOModel(Module): def __init__(self, student, teacher): \"A module for loading and", "..models.vision_transformer import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created", "up starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up scheduler,", "Set larger, e.g. 0.9995, for small batches or 0.996 for large batches (256+).", "0) def forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x", "yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti in", "for large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training", "tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student", "nlayers = max(nlayers, 1) if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else:", "means more sharpening. tps: Student temperature. freeze_last_layer: How many epochs to freeze the", "from ..layers import * from ..models.vision_transformer import * # Cell class DINOHead(nn.Module): '''", "= combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs:", "tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with distillation.", "use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers ==", "zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data * (1. - self.tmom)", "aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False):", "self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data", "for aug in self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO:", "pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self, n=1): xbs", "\"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the end", "= param_t.data * self.tmom + param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C", "warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after", "leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self,", "teacher in eval(), not it original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb", "_momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher();", "loss, npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs): for pi in range(n_preds):", "self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\") for", "get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module): def __init__(self, student,", "if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim,", "with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def", "x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do we need to", "teacher): \"A module for loading and saving all training params together\" self.student,self.teacher =", "def __init__(self, student, teacher): \"A module for loading and saving all training params", "self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05),", "student, teacher): \"A module for loading and saving all training params together\" self.student,self.teacher", "print(aug) def before_fit(self): \"Create teacher model as a copy of student\" self.learn.loss_func =", "= yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs): for", "repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum. Set larger, e.g.", "param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data *", "large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\"", "tpt at the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train)", "norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if", "before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug", "['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import * from ..augmentations import", "small batches or 0.996 for large batches (256+). tpt_warmup: Warm up starting temperature", "dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs,", "tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with", "order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04,", "protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True,", "student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x):", "and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the end of", "views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large =", "end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch ==", "self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = True def lf(self, pred, *yb):", "* (1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self):", "self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True) def _momentum_update_teacher(self): for param_s, param_t", "layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers -", "[self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do we need to put the", "pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs): for pi in", "return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4),", "[tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs: print(aug) def", "isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias,", "= nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n',", "n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs =", "NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__ =", "<gh_stars>100-1000 # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless", "in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp", "0.9995, for small batches or 0.996 for large batches (256+). tpt_warmup: Warm up", "p=2) x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def", "in self.augs: print(aug) def before_fit(self): \"Create teacher model as a copy of student\"", "self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in", "deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False,", "from ..augmentations import * from ..layers import * from ..models.vision_transformer import * #", "tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with distillation. Refer to", "if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x):", "the teacher in eval(), not it original repo? with torch.no_grad(): targs = self.model.teacher(x_large)", "for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()): param_t.data = param_t.data * self.tmom + param_s.data", "- dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell", "(graph leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def", "self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the end of each epoch\" self.tpt", "self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center()", "torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True def", "in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = False def before_batch(self): \"Augment", "False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb, aug in", "show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0,", "loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def show(self, n=1): xbs =", "fastai.vision.all import * from ..augmentations import * from ..layers import * from ..models.vision_transformer", "= nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad =", "pi in range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return", "yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs): for pi", "self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt", "\"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end)", "sharpening. tps: Student temperature. freeze_last_layer: How many epochs to freeze the last layer", "-qlog(p)\" yb = yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb", "SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease if training loss does", "resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module): def __init__(self, student, teacher): \"A", "the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler", "tps: Student temperature. freeze_last_layer: How many epochs to freeze the last layer \"\"\"", "1)[i] for i in idxs for xb, aug in zip(xbs, self.augs)] return show_batch(images[0],", "Warm up starting temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up", "- self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred =", "last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler =", "self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher)))", "nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False", "get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size, mins, maxs", "tmom_end) if print_augs: for aug in self.augs: print(aug) def before_fit(self): \"Create teacher model", "File to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines',", "self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i] for i in", "self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m,", "not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x,", "self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i] for i", "crop cross entropy loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred / self.tps,", "np.random.choice(range(self.bs), n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb,", "loss: -qlog(p)\" yb = yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb =", "saving all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters():", "Cell class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1.,", "mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs)", "each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting", "range(n_targs): for pi in range(n_preds): if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() /", "= aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for", "(unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all", "of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for", "param_t.data * self.tmom + param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C =", "if n == 'weight_v' : p.requires_grad = False def before_batch(self): \"Augment multi crop", "npairs = 0, n_targs*(n_preds-1) for ti in range(n_targs): for pi in range(n_preds): if", "Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol", "aug_pipelines = [] for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales):", "together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C',", "https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers", "''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user (graph leaves) support", "the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048,", "if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if", "freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with distillation. Refer to original repo:", "== self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n", "pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\" yb = yb[0] pred =", "self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = False", "# Cell from fastai.vision.all import * from ..augmentations import * from ..layers import", "range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp =", "does not decrease. Smaller temperature means more sharpening. tps: Student temperature. freeze_last_layer: How", "= self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x #", "tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos,", "if training loss does not decrease. Smaller temperature means more sharpening. tps: Student", "n, False) images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i] for i in idxs for xb, aug", "self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher", "def forward(self, x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x =", "m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.mlp(x) x", "user (graph leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html '''", "original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum. Set larger,", "in self.large_crop_ids] # TODO: Do we need to put the teacher in eval(),", "_ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim))", "\"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug in", "created explicitly by the user (graph leaves) support the deepcopy protocol at the", "the user (graph leaves) support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html", "= [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do we need to put", "bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer:", "of training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt:", "x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4),", "x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x", "with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher", "def after_step(self): \"Center and teacher updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at", "0.996 for large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of", "Teacher temperature after warm up. Decrease if training loss does not decrease. Smaller", "def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom) def after_step(self): \"Center and teacher updates\"", "temperature after warm up. Decrease if training loss does not decrease. Smaller temperature", "model as a copy of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom", "norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers == 1:", "tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with distillation. Refer to original", "class DINOModel(Module): def __init__(self, student, teacher): \"A module for loading and saving all", "self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid = 9,True", "[nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim,", "DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user (graph leaves)", "bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in", "'weight_v' : p.requires_grad = False def before_batch(self): \"Augment multi crop views\" self.bs =", "support the deepcopy protocol at the moment https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html https://pytorch.org/docs/stable/generated/torch.nn.GELU.html ''' def __init__(self, in_dim,", "print_augs: for aug in self.augs: print(aug) def before_fit(self): \"Create teacher model as a", "= self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad", "combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug in self.augs: print(aug)", "layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1)", "# Cell class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996,", "update momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for large", "params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False", "for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = True def", "layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad", "and saving all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p in", "= [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2):", "loading and saving all training params together\" self.student,self.teacher = student,teacher self.teacher.load_state_dict(student.state_dict()) for p", "in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell", "return aug_pipelines # Cell class DINOModel(Module): def __init__(self, student, teacher): \"A module for", "to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad =", "n_targs*(n_preds-1) for ti in range(n_targs): for pi in range(n_preds): if ti != pi:", "yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for", "def before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for", "- 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers)", "copy of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval()", "= False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class DINO(Callback): order,run_valid", "for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = False def", "for small batches or 0.996 for large batches (256+). tpt_warmup: Warm up starting", "class DINO(Callback): order,run_valid = 9,True def __init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos,", "= yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1)", "= True def lf(self, pred, *yb): \"Multi crop cross entropy loss: -qlog(p)\" yb", "as a copy of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom =", "scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease if", "student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum.", "* # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by", "aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs: for aug", "nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines, but=['n', 'size',", "repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,) self.cb = targs.mean(0, keepdim=True)", "yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb - self.model.C) /", "* self.tmom + param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom", "multi crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],)", "= self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters():", "/ self.tps, dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds", "\"Update tpt at the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom =", "Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at", "self.teacher.load_state_dict(student.state_dict()) for p in self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return", "def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers,", "temperature means more sharpening. tps: Student temperature. freeze_last_layer: How many epochs to freeze", "self.large_crop_ids] # TODO: Do we need to put the teacher in eval(), not", "temperature tpt_warmup_pct: Percentage of training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin,", "self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p in self.learn.model.student[1].last_layer.named_parameters(): if", "F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs,", "self.bs = self.x.size(0) self.learn.xb = ([aug(self.x) for aug in self.augs],) x_large = [self.learn.xb[0][i]", "self.teacher.parameters(): p.requires_grad = False self.register_buffer('C', torch.zeros(1,num_features_model(teacher))) def forward(self,x): return self.student(x) # Cell class", "hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers == 1: self.mlp =", "import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly", "Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors created explicitly by the user", "momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches or", "Do we need to put the teacher in eval(), not it original repo?", "False def before_batch(self): \"Augment multi crop views\" self.bs = self.x.size(0) self.learn.xb = ([aug(self.x)", "but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for", "a copy of student\" self.learn.loss_func = self.lf self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.)", "up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher temperature after warm up. Decrease", "https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom: Center update momentum. tmom: Teacher update momentum. Set larger, e.g. 0.9995,", "self.augs: print(aug) def before_fit(self): \"Create teacher model as a copy of student\" self.learn.loss_func", "for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc,", "'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = [] for nc, size,", "= 0, n_targs*(n_preds-1) for ti in range(n_targs): for pi in range(n_preds): if ti", "def after_epoch(self): \"Update tpt at the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train)", "dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from", "= F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1)", "nlayers=3, hidden_dim=2048, bottleneck_dim=256): super().__init__() nlayers = max(nlayers, 1) if nlayers == 1: self.mlp", "n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0,", "yb = yb[0] pred = F.log_softmax(pred / self.tps, dim=-1) yb = F.softmax((yb -", "def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias", "after warm up. Decrease if training loss does not decrease. Smaller temperature means", "* from ..augmentations import * from ..layers import * from ..models.vision_transformer import *", "from ..models.vision_transformer import * # Cell class DINOHead(nn.Module): ''' copy.deepcopy: RuntimeError: Only Tensors", "or 0.996 for large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage", "more sharpening. tps: Student temperature. freeze_last_layer: How many epochs to freeze the last", "epochs to freeze the last layer \"\"\" store_attr('large_crop_ids,cmom,freeze_last_layer,tps') self.augs = aug_pipelines self.tpt_scheduler =", "self.augs = aug_pipelines self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct], [tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)]) self.tmom_scheduler = tmom_sched(tmom_start, tmom_end) if print_augs:", "edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO']", "n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = True def lf(self,", "tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1, print_augs=False): \"\"\" DINO teacher student training with distillation. Refer", "large batches (256+). tpt_warmup: Warm up starting temperature tpt_warmup_pct: Percentage of training for", "to put the teacher in eval(), not it original repo? with torch.no_grad(): targs", "yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs", "# Cell @delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines", "\"\"\" DINO teacher student training with distillation. Refer to original repo: https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41 cmom:", "x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return x # Cell @delegates(get_multi_aug_pipelines,", "in eval(), not it original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb =", "EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__ = ['DINOHead',", "@delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale']) def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs): aug_pipelines = []", "# TODO: Do we need to put the teacher in eval(), not it", "self.tmom + param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom +", "= ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import * from ..augmentations", "tpt: Teacher temperature after warm up. Decrease if training loss does not decrease.", "__init__(self, student, teacher): \"A module for loading and saving all training params together\"", "eval(), not it original repo? with torch.no_grad(): targs = self.model.teacher(x_large) self.learn.yb = (targs,)", "in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = True def lf(self, pred,", "out_dim, bias=False)) self.last_layer.weight_g.data.fill_(1) if norm_last_layer: self.last_layer.weight_g.requires_grad = False def _init_weights(self, m): if isinstance(m,", "before_fit(self): \"Create teacher model as a copy of student\" self.learn.loss_func = self.lf self.tpt", "training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp tpt: Teacher", "the end of each epoch\" self.tpt = self.tpt_scheduler(self.pct_train) self.tmom = self.tmom_scheduler(self.pct_train) if self.epoch", "max_scales): aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module):", "aug in self.augs: print(aug) def before_fit(self): \"Create teacher model as a copy of", "DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified). __all__", "self.model.C) / self.tpt, dim=-1) n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs yb, pred = yb.chunk(n_targs),", "else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers", "if ti != pi: loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs return loss @torch.no_grad() def", "aug_pipelines # Cell class DINOModel(Module): def __init__(self, student, teacher): \"A module for loading", "..augmentations import * from ..layers import * from ..models.vision_transformer import * # Cell", "layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim))", "self.tpt = self.tpt_scheduler(0.) self.tmom = self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n", "training loss does not decrease. Smaller temperature means more sharpening. tps: Student temperature.", "pred.size(0)//self.bs yb, pred = yb.chunk(n_targs), pred.chunk(n_preds) loss, npairs = 0, n_targs*(n_preds-1) for ti", "self.tmom_scheduler(0.) self.model.teacher.eval() for n,p in self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad =", "specified). __all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO'] # Cell from fastai.vision.all import *", "__init__(self, aug_pipelines, large_crop_ids=[0,1], cmom=0.9, tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos, tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin, tps=0.1, freeze_last_layer=1,", "layers.append(nn.GELU()) for _ in range(nlayers - 2): layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU())", "aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs) return aug_pipelines # Cell class DINOModel(Module): def", "if nlayers == 1: self.mlp = nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)]", "x): x = self.mlp(x) x = nn.functional.normalize(x, dim=-1, p=2) x = self.last_layer(x) return", "layers.append(nn.Linear(hidden_dim, hidden_dim)) if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) layers.append(nn.Linear(hidden_dim, bottleneck_dim)) self.mlp = nn.Sequential(*layers) self.apply(self._init_weights) self.last_layer", "= nn.Linear(in_dim, bottleneck_dim) else: layers = [nn.Linear(in_dim, hidden_dim)] if use_bn: layers.append(nn.BatchNorm1d(hidden_dim)) layers.append(nn.GELU()) for", "self.augs],) x_large = [self.learn.xb[0][i] for i in self.large_crop_ids] # TODO: Do we need", "updates\" self._momentum_update_teacher(); self._momentum_update_center() def after_epoch(self): \"Update tpt at the end of each epoch\"", "Percentage of training for warmup tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp", "for i in self.large_crop_ids] # TODO: Do we need to put the teacher", "+ param_s.data * (1. - self.tmom) def _momentum_update_center(self): self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom)", "def show(self, n=1): xbs = self.learn.xb[0] idxs = np.random.choice(range(self.bs), n, False) images =", "self.learn.model.student[1].last_layer.named_parameters(): if n == 'weight_v' : p.requires_grad = False def before_batch(self): \"Augment multi", "self.tmom_scheduler(self.pct_train) if self.epoch == self.freeze_last_layer: print(\"Setting last layer to trainable\") for n,p in" ]
[ "the rectangle : param rect2: the second rectangle. : Return: returns intersection ratio,", "inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return 0 inter=inter_w * inter_h union=w1*h1+w2*h2-inter", "by X, y, W, h, where x, y are the coordinates of the", "def iou(rect1,rect2): ''' Calculate the intersection ratio of two rectangles : param rect1:", "x, y are the coordinates of the upper right corner of the rectangle", "''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return 0", "rectangles : param rect1: the first rectangle. Denoted by X, y, W, h,", "h, where x, y are the coordinates of the upper right corner of", "ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0:", "rectangle. : Return: returns intersection ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1", "that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or", "the first rectangle. Denoted by X, y, W, h, where x, y are", "X, y, W, h, where x, y are the coordinates of the upper", "iou(rect1,rect2): ''' Calculate the intersection ratio of two rectangles : param rect1: the", "second rectangle. : Return: returns intersection ratio, that is, intersection ratio Union '''", "ratio of two rectangles : param rect1: the first rectangle. Denoted by X,", "returns intersection ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2))", "y, W, h, where x, y are the coordinates of the upper right", "W, h, where x, y are the coordinates of the upper right corner", "param rect2: the second rectangle. : Return: returns intersection ratio, that is, intersection", "the upper right corner of the rectangle : param rect2: the second rectangle.", "coordinates of the upper right corner of the rectangle : param rect2: the", "of two rectangles : param rect1: the first rectangle. Denoted by X, y,", "the second rectangle. : Return: returns intersection ratio, that is, intersection ratio Union", "upper right corner of the rectangle : param rect2: the second rectangle. :", "inter_h<=0 or inter_w <= 0: return 0 inter=inter_w * inter_h union=w1*h1+w2*h2-inter return inter/union", "rectangle. Denoted by X, y, W, h, where x, y are the coordinates", "ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0", "x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return 0 inter=inter_w *", "first rectangle. Denoted by X, y, W, h, where x, y are the", "inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return 0 inter=inter_w * inter_h", "Denoted by X, y, W, h, where x, y are the coordinates of", "of the upper right corner of the rectangle : param rect2: the second", ": param rect2: the second rectangle. : Return: returns intersection ratio, that is,", "intersection ratio of two rectangles : param rect1: the first rectangle. Denoted by", "y are the coordinates of the upper right corner of the rectangle :", "of the rectangle : param rect2: the second rectangle. : Return: returns intersection", "rect1: the first rectangle. Denoted by X, y, W, h, where x, y", ": param rect1: the first rectangle. Denoted by X, y, W, h, where", "Return: returns intersection ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2))", "intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <=", "Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return", "are the coordinates of the upper right corner of the rectangle : param", "if inter_h<=0 or inter_w <= 0: return 0 inter=inter_w * inter_h union=w1*h1+w2*h2-inter return", "the coordinates of the upper right corner of the rectangle : param rect2:", ": Return: returns intersection ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2", "Calculate the intersection ratio of two rectangles : param rect1: the first rectangle.", "rectangle : param rect2: the second rectangle. : Return: returns intersection ratio, that", "is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w", "two rectangles : param rect1: the first rectangle. Denoted by X, y, W,", "right corner of the rectangle : param rect2: the second rectangle. : Return:", "param rect1: the first rectangle. Denoted by X, y, W, h, where x,", "''' Calculate the intersection ratio of two rectangles : param rect1: the first", "corner of the rectangle : param rect2: the second rectangle. : Return: returns", "rect2: the second rectangle. : Return: returns intersection ratio, that is, intersection ratio", "the intersection ratio of two rectangles : param rect1: the first rectangle. Denoted", "x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if inter_h<=0 or inter_w <= 0: return 0 inter=inter_w", "where x, y are the coordinates of the upper right corner of the", "intersection ratio, that is, intersection ratio Union ''' x1,y1,w1,h1=rect1 x2,y2,w2,h2=rect2 inter_w=(w1+w2)-(max(x1+w1,x2+w2)-min(x1,x2)) inter_h=(h1+h2)-(max(y1+h1,y2+h2)-min(y1,y2)) if" ]
[ "<reponame>kkltcjk/face import os CONFIG_FILE = '/etc/face/face.conf' LOG_DIR = '/var/log/face' LOG_FILE = os.path.join(LOG_DIR, 'face.log')" ]
[ "#print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a)", "len_a = len(a) def func_b(*b): return func(*(a+b)) return func_b return func_a a1 =", "return func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2) print(a1(4)) a2 = curry(add)(2)", "fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p))", "len(a) def func_b(*b): return func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2) print(a1(4))", "= len(a) def func_b(*b): return func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2)", "func_b(*b): return func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2) print(a1(4)) a2 =", "func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b)) return func_b return", "= func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b)) return func_b", "\"\"\"test run \"\"\" def fun(*args): print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p =", "def add(a,b,c): return a+b+c p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p))", "(1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount", "print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a) def", "return a+b+c p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def", "def func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b)) return func_b return func_a", "q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def", "a+b+c p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func):", "(5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a", "= (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func =", "func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2) print(a1(4)) a2 = curry(add)(2) print(a2(4,6))", "def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b): return", "curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b))", "p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func", "print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a =", "run \"\"\" def fun(*args): print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3)", "def fun(*args): print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3) q =", "print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3) q = (5,6,7) print(p+q)", "def func_b(*b): return func(*(a+b)) return func_b return func_a a1 = curry(add)(1,2) print(a1(4)) a2", "\"\"\" def fun(*args): print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3) q", "func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b)) return func_b return func_a a1", "print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b):", "len_func = func.__code__.co_argcount def func_a(*a): len_a = len(a) def func_b(*b): return func(*(a+b)) return", "= (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount) def curry(func): len_func = func.__code__.co_argcount def func_a(*a):", "fun(*args): print(args) fun(1,2,3,4,5) def add(a,b,c): return a+b+c p = (1,2,3) q = (5,6,7)", "add(a,b,c): return a+b+c p = (1,2,3) q = (5,6,7) print(p+q) #print(type(*p)) print(add(*p)) print(add.__code__.co_argcount)" ]
[ "from django.conf.urls import url, include from django.views.generic import TemplateView urlpatterns = [ url(r'^$',", "Ltd. from django.conf.urls import url, include from django.views.generic import TemplateView urlpatterns = [", "django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$',", "), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\") ),", "import url, include from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ),", "Copyright (c) 2012-2016 Seafile Ltd. from django.conf.urls import url, include from django.views.generic import", "Seafile Ltd. from django.conf.urls import url, include from django.views.generic import TemplateView urlpatterns =", "), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ),", "# Copyright (c) 2012-2016 Seafile Ltd. from django.conf.urls import url, include from django.views.generic", "url, include from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$',", "), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\") ), url(r'^encrypted_libraries/$', TemplateView.as_view(template_name=\"help/encrypted_libraries.html\") ),", "), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ),", "2012-2016 Seafile Ltd. from django.conf.urls import url, include from django.views.generic import TemplateView urlpatterns", "(c) 2012-2016 Seafile Ltd. from django.conf.urls import url, include from django.views.generic import TemplateView", "django.conf.urls import url, include from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\")", "urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$',", "), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ),", "TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ),", "TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\")", "TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\") ), url(r'^encrypted_libraries/$', TemplateView.as_view(template_name=\"help/encrypted_libraries.html\")", "TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\")", "include from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\")", "TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\")", "url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$',", "TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\")", "url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$',", "), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ),", "TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\")", "from django.views.generic import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ),", "url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\") ), url(r'^encrypted_libraries/$', TemplateView.as_view(template_name=\"help/encrypted_libraries.html\") ), ]", "= [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\")", "[ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ),", "url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$', TemplateView.as_view(template_name=\"help/conflicts.html\") ), url(r'^ignore/$', TemplateView.as_view(template_name=\"help/ignore.html\") ), url(r'^encrypted_libraries/$',", "url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$', TemplateView.as_view(template_name=\"help/desktop_proxy.html\") ), url(r'^conflicts/$',", "url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$',", "import TemplateView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^install/$', TemplateView.as_view(template_name=\"help/install.html\") ), url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\")", "url(r'^sync_existing/$', TemplateView.as_view(template_name=\"help/sync_existing.html\") ), url(r'^selective_sync/$', TemplateView.as_view(template_name=\"help/selective_sync.html\") ), url(r'^unsync_resync/$', TemplateView.as_view(template_name=\"help/unsync_resync.html\") ), url(r'^sync_interval/$', TemplateView.as_view(template_name=\"help/sync_interval.html\") ), url(r'^desktop_proxy/$',", "<filename>seahub/help/urls.py # Copyright (c) 2012-2016 Seafile Ltd. from django.conf.urls import url, include from" ]
[ "logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" +", "is None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as e:", "S3 Buckets and various details about them ''' bucket_list = [] # Not", "e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def", "# This API call doesn't paganate. Go fig... bucket_list += response['Buckets'] for b", "e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if", "DDB (based on the the presense of PublicIp in the Association) s3_client =", "response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer']", "s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError as e: if e.response['Error']['Code'] !=", "b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] =", "ClientError as e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in", "b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e try: response =", "serializer for objects not serializable by default json code\"\"\" if isinstance(obj, (datetime, date)):", "= account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go", "response except ClientError as e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del", "import tz from lib.account import * from lib.common import * import logging logger", "b['CORSRules'] = response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] =", "try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError", "{}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers all the S3 Buckets and", "response['ResponseMetadata'] b['Website'] = response except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website']", "Not all Public IPs are attached to instances. So we use ec2 describe_network_interfaces()", "the account info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen']", "account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through", "!= 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response:", "all Public IPs are attached to instances. So we use ec2 describe_network_interfaces() #", "''' Gathers all the S3 Buckets and various details about them ''' bucket_list", "'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] !=", "on the the presense of PublicIp in the Association) s3_client = account.get_client('s3') response", "to instances. So we use ec2 describe_network_interfaces() # All results are saved to", "try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError", "logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context):", "del response['ResponseMetadata'] b['Website'] = response except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration':", "to S3. Public IPs and metadata go to DDB (based on the the", "RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message", "b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] =", "logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" +", "AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name,", "'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location'] =", "ClientError import json import os import time import datetime from dateutil import tz", "= response except ClientError as e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name)", "return() except ClientError as e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e))", "except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name,", "b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors']", "= json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account)", "raise def discover_buckets(account): ''' Gathers all the S3 Buckets and various details about", "b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e try: response =", "e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as", "try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError", "bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects not serializable by default json", "e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules']", "response['ResponseMetadata'] b['Versioning'] = response except ClientError as e: b['errors']['Versioning'] = e try: response", "e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError as", "s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError as e: if", "# Not all Public IPs are attached to instances. So we use ec2", "b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a bunch of API", "about them ''' bucket_list = [] # Not all Public IPs are attached", "e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in", "except AssumeRoleError as e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return()", "sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account =", "if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet'", "response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError as", "= response except ClientError as e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name)", "response['Grants'] except ClientError as e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if", "import datetime from dateutil import tz from lib.account import * from lib.common import", "e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning']", "of API calls to get details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name)", "bucket_list += response['Buckets'] for b in bucket_list: bucket_name = b['Name'] # Decorate with", "str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a bunch of API calls to", "= AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume role into account", "Gathers all the S3 Buckets and various details about them ''' bucket_list =", "'Grants' in response: b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants'] = e", "fig... bucket_list += response['Buckets'] for b in bucket_list: bucket_name = b['Name'] # Decorate", "objects not serializable by default json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat()", "\" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e:", "json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account", "various details about them ''' bucket_list = [] # Not all Public IPs", "ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b)", "as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name)", "= s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants'] except ClientError as e:", "account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {}", "botocore.exceptions import ClientError import json import os import time import datetime from dateutil", "response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer'] = e try: response", "on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] =", "except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response", "logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError", "del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer'] = e try:", "return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account):", "b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] =", "{}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers all the S3 Buckets", "= e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError", "response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH,", "e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError as", "Buckets and various details about them ''' bucket_list = [] # Not all", "# All results are saved to S3. Public IPs and metadata go to", "response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location'] =", "{}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise", "details about them ''' bucket_list = [] # Not all Public IPs are", "not serializable by default json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise", "logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event,", "in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError':", "def json_serial(obj): \"\"\"JSON serializer for objects not serializable by default json code\"\"\" if", "'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response", "= response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name)", "use ec2 describe_network_interfaces() # All results are saved to S3. Public IPs and", "e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del", "= str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a bunch of API calls", "ClientError as e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in", "results are saved to S3. Public IPs and metadata go to DDB (based", "except ClientError as e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata']", "are saved to S3. Public IPs and metadata go to DDB (based on", "response = s3_client.list_buckets() # This API call doesn't paganate. Go fig... bucket_list +=", "as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def", "logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received", "This API call doesn't paganate. Go fig... bucket_list += response['Buckets'] for b in", "response['Buckets'] for b in bucket_list: bucket_name = b['Name'] # Decorate with the account", "= response['Grants'] except ClientError as e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name)", "message: \" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as", "for {}: {}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message,", "'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] !=", "assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error", "e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if", "= e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules']", "s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError as e: b['errors']['Versioning'] = e", "message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id'])", "# Go through a bunch of API calls to get details on this", "s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if", "tz from lib.account import * from lib.common import * import logging logger =", "except ClientError as e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e)) return()", "calls to get details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration'", "event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message,", "logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True))", "= \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a bunch", "{}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error getting info for {}:", "datetime from dateutil import tz from lib.account import * from lib.common import *", "by default json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type", "in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy':", "all the S3 Buckets and various details about them ''' bucket_list = []", "the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() # This API call doesn't", "go to DDB (based on the the presense of PublicIp in the Association)", "if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code']", "= account.get_client('s3') response = s3_client.list_buckets() # This API call doesn't paganate. Go fig...", "e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e)) return() except Exception as", "response['TagSet'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try:", "Error getting info for {}: {}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage:", "and metadata go to DDB (based on the the presense of PublicIp in", "account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error getting info for", "'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e", "dateutil import tz from lib.account import * from lib.common import * import logging", "b) def json_serial(obj): \"\"\"JSON serializer for objects not serializable by default json code\"\"\"", "b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] =", "import ClientError import json import os import time import datetime from dateutil import", "AssumeRoleError as e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except", "e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except", "describe_network_interfaces() # All results are saved to S3. Public IPs and metadata go", "= s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e:", "ec2 describe_network_interfaces() # All results are saved to S3. Public IPs and metadata", "!= 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for", "attached to instances. So we use ec2 describe_network_interfaces() # All results are saved", "ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response =", "'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants']", "response: b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e try: response", "logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e)) return() except Exception as e:", "details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration']", "save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects not serializable by default", "\"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e try:", "s3_client = account.get_client('s3') response = s3_client.list_buckets() # This API call doesn't paganate. Go", "= response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e", "= account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] =", "os import time import datetime from dateutil import tz from lib.account import *", "'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet']", "paganate. Go fig... bucket_list += response['Buckets'] for b in bucket_list: bucket_name = b['Name']", "the the presense of PublicIp in the Association) s3_client = account.get_client('s3') response =", "= response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name)", "Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers", "= s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError as e:", "= response except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e", "logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \"", "= e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled']", "doesn't paganate. Go fig... bucket_list += response['Buckets'] for b in bucket_list: bucket_name =", "metadata go to DDB (based on the the presense of PublicIp in the", "except ClientError as e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules'", "{} # Go through a bunch of API calls to get details on", "= e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants']", "in bucket_list: bucket_name = b['Name'] # Decorate with the account info b['account_id'] =", "e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet'] except", "if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] =", "None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location']", "All results are saved to S3. Public IPs and metadata go to DDB", "response except ClientError as e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del", "as e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] =", "S3. Public IPs and metadata go to DDB (based on the the presense", "json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try:", "# Decorate with the account info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type']", "try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError as e:", "account.get_client('s3') response = s3_client.list_buckets() # This API call doesn't paganate. Go fig... bucket_list", "= [] # Not all Public IPs are attached to instances. So we", "lib.account import * from lib.common import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG)", "if 'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location']", "as e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response:", "as e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] =", "message, vars(context))) raise def discover_buckets(account): ''' Gathers all the S3 Buckets and various", "\"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a bunch of", "them ''' bucket_list = [] # Not all Public IPs are attached to", "from lib.common import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH", "discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id))", "as e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response:", "account info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] =", "e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers all the", "response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration']", "saved to S3. Public IPs and metadata go to DDB (based on the", "try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError as e:", "bucket_list = [] # Not all Public IPs are attached to instances. So", "instances. So we use ec2 describe_network_interfaces() # All results are saved to S3.", "del response['ResponseMetadata'] b['Versioning'] = response except ClientError as e: b['errors']['Versioning'] = e try:", "= {} # Go through a bunch of API calls to get details", "b in bucket_list: bucket_name = b['Name'] # Decorate with the account info b['account_id']", "and various details about them ''' bucket_list = [] # Not all Public", "import os import time import datetime from dateutil import tz from lib.account import", "s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants']", "except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response", "So we use ec2 describe_network_interfaces() # All results are saved to S3. Public", "IPs and metadata go to DDB (based on the the presense of PublicIp", "if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError", "through a bunch of API calls to get details on this bucket try:", "e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy']", "= b['Name'] # Decorate with the account info b['account_id'] = account.account_id b['account_name'] =", "= \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message =", "lib.common import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH =", "response: b['CORSRules'] = response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules']", "response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as", "sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume", "b['TagSet'] = response['TagSet'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] =", "= e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy'])", "= json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e", "the S3 Buckets and various details about them ''' bucket_list = [] #", "b['Website'] = response except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] =", "presense of PublicIp in the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() #", "\"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message'])", "as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name)", "import time import datetime from dateutil import tz from lib.account import * from", "if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled'", "as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name)", "call doesn't paganate. Go fig... bucket_list += response['Buckets'] for b in bucket_list: bucket_name", "def discover_buckets(account): ''' Gathers all the S3 Buckets and various details about them", "except ClientError as e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint'", "[] # Not all Public IPs are attached to instances. So we use", "= s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer'] =", "= logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event, context): logger.debug(\"Received event:", "lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message:", "time import datetime from dateutil import tz from lib.account import * from lib.common", "try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e:", "in response: b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants'] = e try:", "bunch of API calls to get details on this bucket try: response =", "to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS", "= s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e:", "bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except", "\"\"\"JSON serializer for objects not serializable by default json code\"\"\" if isinstance(obj, (datetime,", "e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj):", "json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type %s not", "b['Versioning'] = response except ClientError as e: b['errors']['Versioning'] = e try: response =", "= e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is", "as e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response:", "else: b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e try: response", "a bunch of API calls to get details on this bucket try: response", "as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name)", "= \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e", "b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants'] = e try: response =", "logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers all the S3", "(based on the the presense of PublicIp in the Association) s3_client = account.get_client('s3')", "Public IPs and metadata go to DDB (based on the the presense of", "e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in", "= s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError as e: if e.response['Error']['Code']", "the presense of PublicIp in the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets()", "to get details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in", "json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to", "if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code']", "except ClientError as e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy'", "e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in", "s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else:", "bucket_list: bucket_name = b['Name'] # Decorate with the account info b['account_id'] = account.account_id", "as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): ''' Gathers all", "b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer'] = e try: response =", "b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} #", "for b in bucket_list: bucket_name = b['Name'] # Decorate with the account info", "for objects not serializable by default json code\"\"\" if isinstance(obj, (datetime, date)): return", "s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if", "e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except", "ClientError as e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e)) return() except", "in response: b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e try:", "isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type %s not serializable\" % type(obj))", "import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def lambda_handler(event,", "info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern')))", "= response['TagSet'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e", "'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects", "* from lib.common import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING)", "ClientError as e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website']", "!= 'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] =", "= response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e", "= s3_client.list_buckets() # This API call doesn't paganate. Go fig... bucket_list += response['Buckets']", "target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume role into", "with the account info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] = \"s3-bucket\"", "{}: {}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context)))", "import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\"", "boto3 from botocore.exceptions import ClientError import json import os import time import datetime", "json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except", "e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as", "if 'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError as e: if e.response['Error']['Code']", "in response: b['TagSet'] = response['TagSet'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet':", "context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \"", "are attached to instances. So we use ec2 describe_network_interfaces() # All results are", "= e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError", "'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] !=", "ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response =", "b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except", "try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants'] except ClientError", "except ClientError as e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata']", "e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects not serializable by", "to DDB (based on the the presense of PublicIp in the Association) s3_client", "''' bucket_list = [] # Not all Public IPs are attached to instances.", "= s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError as e:", "info for {}: {}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e,", "s3_client.list_buckets() # This API call doesn't paganate. Go fig... bucket_list += response['Buckets'] for", "discover_buckets(account): ''' Gathers all the S3 Buckets and various details about them '''", "response: b['BucketPolicy'] = json.loads(response['Policy']) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy']", "e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response", "if e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON", "'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError as e: if e.response['Error']['Code'] !=", "response: b['TagSet'] = response['TagSet'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet']", "getting info for {}: {}\".format(target_account.account_name, e)) return() except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext:", "code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type %s not serializable\"", "ClientError as e: b['errors']['Versioning'] = e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer']", "from botocore.exceptions import ClientError import json import os import time import datetime from", "try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location']", "from dateutil import tz from lib.account import * from lib.common import * import", "json_serial(obj): \"\"\"JSON serializer for objects not serializable by default json code\"\"\" if isinstance(obj,", "response: b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants'] = e try: response", "response['LoggingEnabled'] except ClientError as e: b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if", "'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging']", "b['resource_type'] = \"s3-bucket\" b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern'))) b['errors'] = {} # Go through a", "json import os import time import datetime from dateutil import tz from lib.account", "PublicIp in the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() # This API", "b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] =", "= e try: response = s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError", "in response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint']", "logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e:", "s3_client.get_bucket_request_payment(Bucket=bucket_name) del response['ResponseMetadata'] b['RequestPayer'] = response except ClientError as e: b['errors']['RequestPayer'] = e", "e: if e.response['Error']['Code'] != 'NoSuchBucketPolicy': b['errors']['BucketPolicy'] = e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if", "response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except ClientError as e: if", "get details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response:", "e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is None:", "if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError as e: if e.response['Error']['Code']", "IPs are attached to instances. So we use ec2 describe_network_interfaces() # All results", "e: b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response", "API calls to get details on this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if", "as e: logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError", "\" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True))", "in the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() # This API call", "import json import os import time import datetime from dateutil import tz from", "= e try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet']", "s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError as e: b['errors']['Logging']", "ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response =", "target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name,", "we use ec2 describe_network_interfaces() # All results are saved to S3. Public IPs", "except Exception as e: logger.error(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context))) raise def discover_buckets(account): '''", "!= 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response:", "b['errors']['RequestPayer'] = e try: response = s3_client.get_bucket_website(Bucket=bucket_name) del response['ResponseMetadata'] b['Website'] = response except", "s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError as e: if", "response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError as e: b['errors']['Versioning']", "of PublicIp in the Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() # This", "role into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error getting", "from lib.account import * from lib.common import * import logging logger = logging.getLogger()", "vars(context))) raise def discover_buckets(account): ''' Gathers all the S3 Buckets and various details", "b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except", "response['LocationConstraint'] except ClientError as e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if", "response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration'] except ClientError as", "response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except", "import * from lib.common import * import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING)", "if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants'", "= s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata'] b['Versioning'] = response except ClientError as e: b['errors']['Versioning'] =", "b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects not", "b['errors'] = {} # Go through a bunch of API calls to get", "b['Name'] # Decorate with the account info b['account_id'] = account.account_id b['account_name'] = account.account_name", "Decorate with the account info b['account_id'] = account.account_id b['account_name'] = account.account_name b['resource_type'] =", "b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint']", "except ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response", "Association) s3_client = account.get_client('s3') response = s3_client.list_buckets() # This API call doesn't paganate.", "except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try: response", "= e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer for objects not serializable", "this bucket try: response = s3_client.get_bucket_encryption(Bucket=bucket_name) if 'ServerSideEncryptionConfiguration' in response: b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration']", "ClientError as e: b['errors']['Location'] = e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in", "response['ServerSideEncryptionConfiguration'] except ClientError as e: if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try:", "e.response['Error']['Code'] != 'NoSuchCORSConfiguration': b['errors']['CORSRules'] = e save_resource_to_s3(RESOURCE_PATH, bucket_name, b) def json_serial(obj): \"\"\"JSON serializer", "response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError as", "e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants'] except", "b['errors']['Logging'] = e try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] =", "default json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type %s", "Go fig... bucket_list += response['Buckets'] for b in bucket_list: bucket_name = b['Name'] #", "= s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if response['LocationConstraint'] is None: b['Location'] = \"us-east-1\"", "e try: response = s3_client.get_bucket_policy(Bucket=bucket_name) if 'Policy' in response: b['BucketPolicy'] = json.loads(response['Policy']) except", "try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] = response['LoggingEnabled'] except ClientError", "bucket_name = b['Name'] # Decorate with the account info b['account_id'] = account.account_id b['account_name']", "+= response['Buckets'] for b in bucket_list: bucket_name = b['Name'] # Decorate with the", "* import logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) RESOURCE_PATH = \"s3/bucket\" def", "+ json.dumps(message, sort_keys=True)) try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable", "e: b['errors']['Grants'] = e try: response = s3_client.get_bucket_location(Bucket=bucket_name) if 'LocationConstraint' in response: if", "if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response = s3_client.get_bucket_versioning(Bucket=bucket_name) del response['ResponseMetadata']", "response except ClientError as e: if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration': b['errors']['Website'] = e try:", "= s3_client.get_bucket_tagging(Bucket=bucket_name) if 'TagSet' in response: b['TagSet'] = response['TagSet'] except ClientError as e:", "response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response: b['Grants'] = response['Grants'] except ClientError as", "response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError as", "b['errors']['Website'] = e try: response = s3_client.get_bucket_logging(Bucket=bucket_name) if 'LoggingEnabled' in response: b['Logging'] =", "API call doesn't paganate. Go fig... bucket_list += response['Buckets'] for b in bucket_list:", "in response: b['CORSRules'] = response['CORSRules'] except ClientError as e: if e.response['Error']['Code'] != 'NoSuchCORSConfiguration':", "if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError (\"Type %s not serializable\" %", "def lambda_handler(event, context): logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received", "+ json.dumps(event, sort_keys=True)) message = json.loads(event['Records'][0]['Sns']['Message']) logger.info(\"Received message: \" + json.dumps(message, sort_keys=True)) try:", "b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as e: b['errors']['Location'] =", "Public IPs are attached to instances. So we use ec2 describe_network_interfaces() # All", "!= 'ServerSideEncryptionConfigurationNotFoundError': b['errors']['ServerSideEncryptionConfiguration'] = e try: response = s3_client.get_bucket_acl(Bucket=bucket_name) if 'Grants' in response:", "Go through a bunch of API calls to get details on this bucket", "serializable by default json code\"\"\" if isinstance(obj, (datetime, date)): return obj.isoformat() raise TypeError", "ClientError as e: if e.response['Error']['Code'] != 'NoSuchTagSet': b['errors']['TagSet'] = e try: response =", "response['LocationConstraint'] is None: b['Location'] = \"us-east-1\" else: b['Location'] = response['LocationConstraint'] except ClientError as", "if 'Grants' in response: b['Grants'] = response['Grants'] except ClientError as e: b['errors']['Grants'] =", "try: response = s3_client.get_bucket_cors(Bucket=bucket_name) if 'CORSRules' in response: b['CORSRules'] = response['CORSRules'] except ClientError", "try: target_account = AWSAccount(message['account_id']) discover_buckets(target_account) except AssumeRoleError as e: logger.error(\"Unable to assume role", "as e: logger.error(\"AWS Error getting info for {}: {}\".format(target_account.account_name, e)) return() except Exception", "import boto3 from botocore.exceptions import ClientError import json import os import time import", "into account {}({})\".format(target_account.account_name, target_account.account_id)) return() except ClientError as e: logger.error(\"AWS Error getting info" ]
[]
[ "import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution of capital", "self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected", "== \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) #", "Set Chart Title self.total = sum([i[1] for i in data]) self.setDefaultTitle() # Add", "_slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as", "accounts - strategies : distribution between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove", "strategies : distribution between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any previous", "else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\"", "elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data =", "= sum([i[1] for i in data]) self.setDefaultTitle() # Add to series for d", "mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the new data. Modes:", "for slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals", "5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total balance", "mode == \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\":", "# Hide little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() <", "# Sort # Set Chart Title self.total = sum([i[1] for i in data])", "confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont", "of capital according to several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts", "according to several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart", ")), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set Chart Title self.total", "self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries()", "mode == \"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat()", "labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series)", "cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the", "slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\")", "setDefaultTitle(self): \"\"\" Sets title as total balance from all pie slices \"\"\" self.chart.setTitle(", "self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def", "displaying the new data. Modes: - all : distribution between all accounts -", "crypto accounts - strategies : distribution between strategies \"\"\" # Series self.chart.removeAllSeries() #", "PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances", "_slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False)", "distribution of capital according to several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args,", "cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set Chart Title self.total = sum([i[1]", "Sort # Set Chart Title self.total = sum([i[1] for i in data]) self.setDefaultTitle()", "title as total balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)} {confighandler.get_fiat_currency().upper()}\") font", "balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)} {confighandler.get_fiat_currency().upper()}\") font = ChartTitleFont(fontsize=20) self.chart.setTitleFont(font)", "between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any previous series self.series =", "DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution of capital according to several", "QChartView, QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler", "= [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set", "portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that", "\"\"\" Pie chart that shows the distribution of capital according to several criteria", "+ cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\":", "{round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self):", "all accounts - accs : distribution between portfolio accounts - cryptoaccs : distribution", "# Add to series for d in data: self.series.append(d[0], d[1]) # Hide little", "self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing)", "d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan()", "*args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations)", "in data: self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for slc in", "data]) self.setDefaultTitle() # Add to series for d in data: self.series.append(d[0], d[1]) #", "series self.series = QPieSeries() # Get data if mode == \"all\": data =", "= QPieSeries() # Get data if mode == \"all\": data = balances.get_all_accounts( )", "QPieSeries() # Get data if mode == \"all\": data = balances.get_all_accounts( ) +", "\"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else:", "self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def setupSeries(self,", "\"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )),", "data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts()", ": distribution between crypto accounts - strategies : distribution between strategies \"\"\" #", "d in data: self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for slc", "data: self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices():", "elif mode == \"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\": data =", "selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\" font = ChartTitleFont() if state:", "criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart()", "Modes: - all : distribution between all accounts - accs : distribution between", "= balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts() elif", "between all accounts - accs : distribution between portfolio accounts - cryptoaccs :", "\"\"\" Highlight selected slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle(", "self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def setupSeries(self, mode=\"all\"):", "self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\" font = ChartTitleFont()", "# Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected slice", "all : distribution between all accounts - accs : distribution between portfolio accounts", "mode=\"all\"): \"\"\" Chart gets updated displaying the new data. Modes: - all :", "between portfolio accounts - cryptoaccs : distribution between crypto accounts - strategies :", "ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution of capital according", "slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and", "distribution between portfolio accounts - cryptoaccs : distribution between crypto accounts - strategies", "self.setupSeries() # Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated", "- strategies : distribution between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any", "state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5:", "- cryptoaccs : distribution between crypto accounts - strategies : distribution between strategies", "_slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total balance from all pie slices", "= QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border:", "\"\"\" Sets title as total balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)}", "super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\"", "to several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart", "Title self.total = sum([i[1] for i in data]) self.setDefaultTitle() # Add to series", "balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie", "QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances", "i in data]) self.setDefaultTitle() # Add to series for d in data: self.series.append(d[0],", "f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font)", "self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color:", "self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total balance from all pie", "to series for d in data: self.series.append(d[0], d[1]) # Hide little slices' labels", "PyQt5.QtGui import QBrush, QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils", "- all : distribution between all accounts - accs : distribution between portfolio", "import QBrush, QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import", "_slice, state): \"\"\" Highlight selected slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20)", "state): \"\"\" Highlight selected slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True)", "if mode == \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode ==", ": distribution between portfolio accounts - cryptoaccs : distribution between crypto accounts -", "- accs : distribution between portfolio accounts - cryptoaccs : distribution between crypto", "accounts - cryptoaccs : distribution between crypto accounts - strategies : distribution between", "in data]) self.setDefaultTitle() # Add to series for d in data: self.series.append(d[0], d[1])", "slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self,", "self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() #", "the distribution of capital according to several criteria \"\"\" def __init__(self, *args, **kwargs):", "several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart =", "cryptoaccs : distribution between crypto accounts - strategies : distribution between strategies \"\"\"", "= cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())]", "x: x[1]) # Sort # Set Chart Title self.total = sum([i[1] for i", "ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan()", "gets updated displaying the new data. Modes: - all : distribution between all", "for i in data]) self.setDefaultTitle() # Add to series for d in data:", "accs : distribution between portfolio accounts - cryptoaccs : distribution between crypto accounts", "if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title", "= ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if", "from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from", "QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px;", "new data. Modes: - all : distribution between all accounts - accs :", "# Remove any previous series self.series = QPieSeries() # Get data if mode", "class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution of capital according to", "# Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying", "Pie chart that shows the distribution of capital according to several criteria \"\"\"", "== \"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif", "slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\"", "\"\"\" Chart gets updated displaying the new data. Modes: - all : distribution", "selected slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()}", "self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode", "balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts() elif mode", "QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler", "font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False)", "self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize", "\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark)", "_slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total balance from", "between crypto accounts - strategies : distribution between strategies \"\"\" # Series self.chart.removeAllSeries()", "to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the new", "#!/usr/bin/python3 from PyQt5.QtGui import QBrush, QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries", "capital according to several criteria \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #", "QBrush, QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import confighandler", "portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution of", "QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import", "portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView):", "\"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort", "accounts - accs : distribution between portfolio accounts - cryptoaccs : distribution between", "data = balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode ==", "portfolio accounts - cryptoaccs : distribution between crypto accounts - strategies : distribution", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide()", "\"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode", "rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets", "{confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def", "def setDefaultTitle(self): \"\"\" Sets title as total balance from all pie slices \"\"\"", "Series self.chart.removeAllSeries() # Remove any previous series self.series = QPieSeries() # Get data", "self.total = sum([i[1] for i in data]) self.setDefaultTitle() # Add to series for", "the new data. Modes: - all : distribution between all accounts - accs", "cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts() elif mode == \"crypto\": data", "elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x:", "mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1])", "series for d in data: self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True)", "strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any previous series self.series = QPieSeries()", "# Set Chart Title self.total = sum([i[1] for i in data]) self.setDefaultTitle() #", "self.series = QPieSeries() # Get data if mode == \"all\": data = balances.get_all_accounts(", "Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the", ": distribution between all accounts - accs : distribution between portfolio accounts -", "_slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total balance from all", ") + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data = balances.get_all_accounts() elif mode ==", "previous series self.series = QPieSeries() # Get data if mode == \"all\": data", "slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight", "= balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\":", "font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets", "from PyQt5.QtGui import QBrush, QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from", "self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) #", "updated displaying the new data. Modes: - all : distribution between all accounts", "self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if", "**kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False)", "as total balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)} {confighandler.get_fiat_currency().upper()}\") font =", "# Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white')))", "data if mode == \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode", "self.setDefaultTitle() # Add to series for d in data: self.series.append(d[0], d[1]) # Hide", "chart that shows the distribution of capital according to several criteria \"\"\" def", "\"\"\" # Series self.chart.removeAllSeries() # Remove any previous series self.series = QPieSeries() #", "background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart", "balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set Chart Title", "# Series self.chart.removeAllSeries() # Remove any previous series self.series = QPieSeries() # Get", "Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\"", "**kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \")", "balances.get_all_accounts() elif mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data", "< 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice,", "from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows the distribution", "self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice)", "QColor, QPainter from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import confighandler from", "all mode def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the new data.", "distribution between all accounts - accs : distribution between portfolio accounts - cryptoaccs", "distribution between crypto accounts - strategies : distribution between strategies \"\"\" # Series", "# Get data if mode == \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat()", "[(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set Chart", "\") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes()", "== \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data", "Hide little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() < 5:", "data. Modes: - all : distribution between all accounts - accs : distribution", "< 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle() _slice.setLabelFont(font) def setDefaultTitle(self): \"\"\" Sets title as total", "import confighandler from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import", "any previous series self.series = QPieSeries() # Get data if mode == \"all\":", "self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\")", "cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda", "== \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts(", "Chart gets updated displaying the new data. Modes: - all : distribution between", "def setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the new data. Modes: -", "font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False)", "Get data if mode == \"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif", "in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality", "setupSeries(self, mode=\"all\"): \"\"\" Chart gets updated displaying the new data. Modes: - all", ": distribution between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any previous series", "def selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\" font = ChartTitleFont() if", "data.sort(key=lambda x: x[1]) # Sort # Set Chart Title self.total = sum([i[1] for", "Highlight selected slice \"\"\" font = ChartTitleFont() if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())}", "distribution between strategies \"\"\" # Series self.chart.removeAllSeries() # Remove any previous series self.series", "if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def", "x[1]) # Sort # Set Chart Title self.total = sum([i[1] for i in", "Sets title as total balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)} {confighandler.get_fiat_currency().upper()}\")", "shows the distribution of capital according to several criteria \"\"\" def __init__(self, *args,", "from PyQt5.QtChart import QChartView, QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import", "and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\" font", "little slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False)", "import QChartView, QChart, QPieSeries from portfolio.utils import confighandler from portfolio.db.fdbhandler import balances from", "(\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort # Set Chart Title self.total =", "data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\",", "Chart Title self.total = sum([i[1] for i in data]) self.setDefaultTitle() # Add to", "self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart) self.setRenderHint(QPainter.Antialiasing) self.setStyleSheet(\"border: 0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to", "from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart", "data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts( )), (\"BTC\", cbalances.get_total_balance_all_accounts_fiat())] data.sort(key=lambda x: x[1]) # Sort #", "total balance from all pie slices \"\"\" self.chart.setTitle( f\"{int(self.total)} {confighandler.get_fiat_currency().upper()}\") font = ChartTitleFont(fontsize=20)", "sum([i[1] for i in data]) self.setDefaultTitle() # Add to series for d in", "self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() < 5: _slice.setLabelVisible(False) _slice.setExploded(False) self.setDefaultTitle()", "Chart self.chart = QChart() self.chart.setTheme(QChart.ChartThemeDark) self.chart.legend().hide() self.chart.createDefaultAxes() self.chart.setAnimationOptions(QChart.SeriesAnimations) self.chart.setBackgroundVisible(False) self.chart.setTitle(\" \") self.chart.setTitleBrush(QBrush(QColor('white'))) self.setChart(self.chart)", "import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\"", "0px; background-color: rgba(0,0,0,0)\") self.setupSeries() # Initialize to all mode def setupSeries(self, mode=\"all\"): \"\"\"", "for d in data: self.series.append(d[0], d[1]) # Hide little slices' labels self.series.setLabelsVisible(True) for", "import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class DistributionPieChart(QChartView): \"\"\" Pie chart that shows", "Remove any previous series self.series = QPieSeries() # Get data if mode ==", "slices' labels self.series.setLabelsVisible(True) for slc in self.series.slices(): if slc.angleSpan() < 5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05)", "5: slc.setLabelVisible(False) slc.setLabelArmLengthFactor(0.05) self.chart.addSeries(self.series) # Signals and functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state):", "if state: font.setPointSize(20) _slice.setLabelVisible(True) self.chart.setTitle( f\"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%\") else: font.setBold(False) if _slice.angleSpan() <", "self.chart.removeAllSeries() # Remove any previous series self.series = QPieSeries() # Get data if", "that shows the distribution of capital according to several criteria \"\"\" def __init__(self,", "mode == \"crypto\": data = cbalances.get_all_accounts_with_amount_fiat() elif mode == \"currency\": data = [(confighandler.get_fiat_currency().upper(),", "Add to series for d in data: self.series.append(d[0], d[1]) # Hide little slices'", "from portfolio.db.fdbhandler import balances from portfolio.db.cdbhandler import cbalances from portfolio.gui.ui_components.fonts import ChartTitleFont class", "functionality self.series.hovered.connect(self.selectSlice) def selectSlice(self, _slice, state): \"\"\" Highlight selected slice \"\"\" font =", "\"all\": data = balances.get_all_accounts( ) + cbalances.get_all_accounts_with_amount_fiat() elif mode == \"accounts\": data =" ]
[ "def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName'])", "群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) #", "print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName']) if(re.match(u'^搜', msg['Text'])): itchat.send_msg(u'电影名xxx',msg[u'FromUserName']) itchat.auto_login(hotReload=True,enableCmdQR=True) itchat.run(debug=True)", "说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name =", "''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True)", "# 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text']))", "itchat from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' #", "from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控", "#coding=utf8 import re import itchat from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字", "import itchat from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 '''", "itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName']) if(re.match(u'^搜', msg['Text'])): itchat.send_msg(u'电影名xxx',msg[u'FromUserName']) itchat.auto_login(hotReload=True,enableCmdQR=True)", "room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName']) if(re.match(u'^搜', msg['Text'])):", "功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg):", "True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说',", "= itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName']) if(re.match(u'^搜', msg['Text'])): itchat.send_msg(u'电影名xxx',msg[u'FromUserName'])", "0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def", "@itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字", "re import itchat from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息", "import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat", "''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[", "isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说',", "groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])): itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName']) if(re.match(u'^搜',", ",然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName'])", "1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat = True) def groupchat_reply(msg): room_name", "itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT,", "* ''' 0.0.1版本 功能: 1.匹配群聊关键字 说 ,然后回复接受到的消息 ''' # 群聊监控 @itchat.msg_register(TEXT, isGroupChat =", "= True) def groupchat_reply(msg): room_name = itchat.search_chatrooms(userName=msg[u'FromUserName']) print(u\"来自-%s-群聊消息|%s:%s\"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text'])) # 匹配说关键字 if(re.match(u'^说', msg['Text'])):", "import re import itchat from itchat.content import * ''' 0.0.1版本 功能: 1.匹配群聊关键字 说" ]
[ "type of gene mentions extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import", "if __name__ == '__main__': if len(sys.argv) < 3: print \"Process.py: Insufficient arguments\" else:", "== '__main__': if len(sys.argv) < 3: print \"Process.py: Insufficient arguments\" else: process_pg_statistics(sys.argv[1], sys.argv[2])", "# Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics", "__name__ == '__main__': if len(sys.argv) < 3: print \"Process.py: Insufficient arguments\" else: process_pg_statistics(sys.argv[1],", "python # A script for seeing basic statistics about the number and type", "extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils import", "sys from dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if len(sys.argv) < 3:", "A script for seeing basic statistics about the number and type of gene", "# Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if __name__ == '__main__':", "import process_pg_statistics if __name__ == '__main__': if len(sys.argv) < 3: print \"Process.py: Insufficient", "# A script for seeing basic statistics about the number and type of", "about the number and type of gene mentions extracted # Author: <NAME> <<EMAIL>>", "mentions extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils", "<NAME> <<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if __name__", "dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if len(sys.argv) < 3: print \"Process.py:", "script for seeing basic statistics about the number and type of gene mentions", "the number and type of gene mentions extracted # Author: <NAME> <<EMAIL>> #", "#!/usr/bin/env python # A script for seeing basic statistics about the number and", "statistics about the number and type of gene mentions extracted # Author: <NAME>", "2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if len(sys.argv)", "seeing basic statistics about the number and type of gene mentions extracted #", "Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if", "for seeing basic statistics about the number and type of gene mentions extracted", "and type of gene mentions extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25", "gene mentions extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys from", "from dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if len(sys.argv) < 3: print", "basic statistics about the number and type of gene mentions extracted # Author:", "of gene mentions extracted # Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys", "number and type of gene mentions extracted # Author: <NAME> <<EMAIL>> # Created:", "process_pg_statistics if __name__ == '__main__': if len(sys.argv) < 3: print \"Process.py: Insufficient arguments\"", "import sys from dd_analysis_utils import process_pg_statistics if __name__ == '__main__': if len(sys.argv) <", "Author: <NAME> <<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if", "<<EMAIL>> # Created: 2015-01-25 import sys from dd_analysis_utils import process_pg_statistics if __name__ ==" ]
[ "os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR", "os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"):", "Place files to be scanned in the 'readings' directory. They must be PDFs.", "to convert and move names = [] for root, dirs, files in os.walk(\"./readings\"):", "move names = [] for root, dirs, files in os.walk(\"./readings\"): for file in", "\".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files # Sources for python scripting: #", "<NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be", "\".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\",", "OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not", "call # Determine which files to convert and move names = [] for", "sure the OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"])", "for name in names: name = name[0 : len(name) - 4] read =", "in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file) # Make sure the", "name + \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt", "to searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 # This program requires", ": len(name) - 4] read = \"./readings/\" + name + \".pdf\" out =", "for root, dirs, files in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file)", "# Place files to be scanned in the 'readings' directory. They must be", "+ name + \".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert", "# Determine which files to convert and move names = [] for root,", "Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in", "must be PDFs. import os from subprocess import call # Determine which files", "created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"])", "# Begin for name in names: name = name[0 : len(name) - 4]", "+ name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out])", "# Make sure the OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\",", "which files to convert and move names = [] for root, dirs, files", "\"-o\", out]) # Move files to read call([\"mv\", \"readings/\" + name + \".pdf\",", "make txt files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk # https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories #", "of 'pdfocr' by <NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place", "to be scanned in the 'readings' directory. They must be PDFs. import os", "Determine which files to convert and move names = [] for root, dirs,", "\"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\",", "call([\"sh\", \"text.sh\"]) # make txt files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk", "\"text.sh\"]) # make txt files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk #", "txt files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk # https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories # https://stackoverflow.com/questions/89228/calling-an-external-command-in-python", "Make sure the OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\",", "read = \"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\" + name +", "from subprocess import call # Determine which files to convert and move names", "in files: if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory has been", "import os from subprocess import call # Determine which files to convert and", "program requires the installation of 'pdfocr' by <NAME> # Which can be found", "+ \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files", "files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk # https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories # https://stackoverflow.com/questions/89228/calling-an-external-command-in-python #", "+ \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move", "\"\"\"Converter from unsearchable PDFs to searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05", "scanned in the 'readings' directory. They must be PDFs. import os from subprocess", "files in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file) # Make sure", "be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in the 'readings'", "\"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"])", "names: name = name[0 : len(name) - 4] read = \"./readings/\" + name", "files to read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" + name +", "\"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in names:", "been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\",", "'readings' directory. They must be PDFs. import os from subprocess import call #", "\"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in", "and move names = [] for root, dirs, files in os.walk(\"./readings\"): for file", "name[0 : len(name) - 4] read = \"./readings/\" + name + \".pdf\" out", "+ \".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the files", "directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"):", "call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files to read call([\"mv\", \"readings/\" +", "This program requires the installation of 'pdfocr' by <NAME> # Which can be", "# Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned", "root, dirs, files in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file) #", "# Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk # https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories # https://stackoverflow.com/questions/89228/calling-an-external-command-in-python # https://stackoverflow.com/questions/8933237/how-to-find-if-directory-exists-in-python", "os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for", "len(name) - 4] read = \"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\"", "files: if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory has been created.", "if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if", "= \"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\"", "# make txt files # Sources for python scripting: # https://docs.python.org/3/library/os.html#os.fwalk # https://stackoverflow.com/questions/1274506/how-can-i-create-a-list-of-files-in-the-current-directory-and-its-subdirectories", "found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in the 'readings' directory.", "Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files to read", "out]) # Move files to read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\"", "https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in the 'readings' directory. They must", "names = [] for root, dirs, files in os.walk(\"./readings\"): for file in files:", "file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory has been created. if not", "directory. They must be PDFs. import os from subprocess import call # Determine", "the 'readings' directory. They must be PDFs. import os from subprocess import call", "call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name", "\"-i\", read, \"-o\", out]) # Move files to read call([\"mv\", \"readings/\" + name", "# Date: 2018/03/05 # This program requires the installation of 'pdfocr' by <NAME>", "files to convert and move names = [] for root, dirs, files in", "= [] for root, dirs, files in os.walk(\"./readings\"): for file in files: if", "\"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" #", "files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files to read call([\"mv\", \"readings/\"", "[] for root, dirs, files in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"):", "be scanned in the 'readings' directory. They must be PDFs. import os from", "has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\",", "be PDFs. import os from subprocess import call # Determine which files to", "not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not", "out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\",", "# Move files to read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" +", "names.append(file) # Make sure the OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"):", "2018/03/05 # This program requires the installation of 'pdfocr' by <NAME> # Which", "the installation of 'pdfocr' by <NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902", "the OCR directory has been created. if not os.path.isdir(\"searchable/pdf\"): call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if", "\".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files #", "+ \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files # Sources for python scripting:", "+ name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files # Sources for", "PDFs to searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 # This program", "by <NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to", "name in names: name = name[0 : len(name) - 4] read = \"./readings/\"", "read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\",", "to read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" + name + \".pdf\"])", "installation of 'pdfocr' by <NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 #", "if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) #", "Date: 2018/03/05 # This program requires the installation of 'pdfocr' by <NAME> #", "requires the installation of 'pdfocr' by <NAME> # Which can be found at", "= \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read,", "dirs, files in os.walk(\"./readings\"): for file in files: if file.endswith(\".pdf\"): names.append(file) # Make", "name = name[0 : len(name) - 4] read = \"./readings/\" + name +", "not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin", "Begin for name in names: name = name[0 : len(name) - 4] read", "can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in the", "- 4] read = \"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\" +", "files to be scanned in the 'readings' directory. They must be PDFs. import", "convert and move names = [] for root, dirs, files in os.walk(\"./readings\"): for", "# This program requires the installation of 'pdfocr' by <NAME> # Which can", "the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files to read call([\"mv\",", "\"read\"]) # Begin for name in names: name = name[0 : len(name) -", "in the 'readings' directory. They must be PDFs. import os from subprocess import", "4] read = \"./readings/\" + name + \".pdf\" out = \"./searchable/pdf/\" + name", "Move files to read call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" + name", "call([\"mkdir\", \"-p\", \"searchable/pdf\"]) if not os.path.isdir(\"searchable/txt\"): call([\"mkdir\", \"-p\", \"searchable/txt\"]) if not os.path.isdir(\"read\"): call([\"mkdir\",", "\"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read, \"-o\",", "not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in names: name =", "call([\"mv\", \"readings/\" + name + \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"])", "name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files # Sources for python", "Author: <NAME> # Date: 2018/03/05 # This program requires the installation of 'pdfocr'", "if not os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in names: name", "call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in names: name = name[0 :", "\"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make txt files # Sources", "in names: name = name[0 : len(name) - 4] read = \"./readings/\" +", "\"readings/\" + name + \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) #", "<NAME> # Date: 2018/03/05 # This program requires the installation of 'pdfocr' by", "import call # Determine which files to convert and move names = []", "= name[0 : len(name) - 4] read = \"./readings/\" + name + \".pdf\"", "os from subprocess import call # Determine which files to convert and move", "PDFs. import os from subprocess import call # Determine which files to convert", "os.path.isdir(\"read\"): call([\"mkdir\", \"-p\", \"read\"]) # Begin for name in names: name = name[0", "at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files to be scanned in the 'readings' directory. They", "# Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files to", "\"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) # Move files", "<gh_stars>0 \"\"\"Converter from unsearchable PDFs to searchable PDFs.\"\"\" # Author: <NAME> # Date:", "searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 # This program requires the", "PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 # This program requires the installation", "'pdfocr' by <NAME> # Which can be found at https://launchpad.net/~gezakovacs/+archive/ubuntu/pdfocr/+build/7671902 # Place files", "# Author: <NAME> # Date: 2018/03/05 # This program requires the installation of", "name + \"-OCR.pdf\" # Convert the files call([\"pdfocr\", \"-i\", read, \"-o\", out]) #", "\"-p\", \"read\"]) # Begin for name in names: name = name[0 : len(name)", "from unsearchable PDFs to searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 #", "file in files: if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory has", "read, \"-o\", out]) # Move files to read call([\"mv\", \"readings/\" + name +", "They must be PDFs. import os from subprocess import call # Determine which", "unsearchable PDFs to searchable PDFs.\"\"\" # Author: <NAME> # Date: 2018/03/05 # This", "for file in files: if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory", "if file.endswith(\".pdf\"): names.append(file) # Make sure the OCR directory has been created. if", "+ name + \".pdf\", \"read/\" + name + \".pdf\"]) call([\"sh\", \"text.sh\"]) # make", "name + \".pdf\" out = \"./searchable/pdf/\" + name + \"-OCR.pdf\" # Convert the", "subprocess import call # Determine which files to convert and move names =" ]
[ "or 0) except: dy = 0 score = 1.0/(pe)*10 + dy data['PE'] =", "try: dy = float(data['DY'] or 0) except: dy = 0 score = 1.0/(pe)*10", "float(data['DY'] or 0) except: dy = 0 score = 1.0/(pe)*10 + dy data['PE']", "import sys import json for line in sys.stdin.readlines(): data = json.loads(line) symbol =", "dy = 0 score = 1.0/(pe)*10 + dy data['PE'] = pe data['DY'] =", "except: pe = 1000 try: dy = float(data['DY'] or 0) except: dy =", "= 1000 try: dy = float(data['DY'] or 0) except: dy = 0 score", "#!/usr/bin/python import sys import json for line in sys.stdin.readlines(): data = json.loads(line) symbol", "line in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE']", "1000) except: pe = 1000 try: dy = float(data['DY'] or 0) except: dy", "score = 1.0/(pe)*10 + dy data['PE'] = pe data['DY'] = dy print score,", "<reponame>redahe/opportuner #!/usr/bin/python import sys import json for line in sys.stdin.readlines(): data = json.loads(line)", "sys import json for line in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL')", "try: pe = float(data['PE'] or 1000) except: pe = 1000 try: dy =", "float(data['PE'] or 1000) except: pe = 1000 try: dy = float(data['DY'] or 0)", "except: dy = 0 score = 1.0/(pe)*10 + dy data['PE'] = pe data['DY']", "json for line in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try: pe", "= data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except: pe = 1000 try:", "= 0 score = 1.0/(pe)*10 + dy data['PE'] = pe data['DY'] = dy", "in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or", "= float(data['PE'] or 1000) except: pe = 1000 try: dy = float(data['DY'] or", "= 1.0/(pe)*10 + dy data['PE'] = pe data['DY'] = dy print score, symbol,", "1.0/(pe)*10 + dy data['PE'] = pe data['DY'] = dy print score, symbol, json.dumps(data)", "data = json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except:", "dy = float(data['DY'] or 0) except: dy = 0 score = 1.0/(pe)*10 +", "1000 try: dy = float(data['DY'] or 0) except: dy = 0 score =", "or 1000) except: pe = 1000 try: dy = float(data['DY'] or 0) except:", "json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except: pe =", "pe = float(data['PE'] or 1000) except: pe = 1000 try: dy = float(data['DY']", "sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or 1000)", "= json.loads(line) symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except: pe", "0 score = 1.0/(pe)*10 + dy data['PE'] = pe data['DY'] = dy print", "for line in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try: pe =", "data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except: pe = 1000 try: dy", "0) except: dy = 0 score = 1.0/(pe)*10 + dy data['PE'] = pe", "import json for line in sys.stdin.readlines(): data = json.loads(line) symbol = data.pop('SYMBOL') try:", "= float(data['DY'] or 0) except: dy = 0 score = 1.0/(pe)*10 + dy", "symbol = data.pop('SYMBOL') try: pe = float(data['PE'] or 1000) except: pe = 1000", "pe = 1000 try: dy = float(data['DY'] or 0) except: dy = 0" ]
[ "import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator", "str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__", "p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import", "0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print", "{}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm # write", "l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET)", "backwards. p_FileName : the full path to the file to be read backwards", "TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to read a", "end='') # read and compare original file to reversed tmp file, should be", "p_BufferSize : the size of the file chunk to read into memory for", "read and compare original file to reversed tmp file, should be identical for", "os import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv)", "utf-8 p_Separator : the character(s) used to separate the stream. Usually either newline", "while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might", "l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment =", "for l_Line in l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2 = 0", "file to be read backwards p_BufferSize : the size of the file chunk", "or space. p_KeepNewLine : keep the newline character at the end of the", "1 if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments", "if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments =", "l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File:", "l_Count1 = 0 with open(sys.argv[1], 'r') as l_File: for l_Line in l_File: l_Count1", "l_Moment2 = time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 +=", "might overshoot at first read l_Blocks -= 1 if not l_Separator in l_BufferContent:", "reversed content to tmp file with open(C_TestFileName, 'w') as l_TempFile: for l_Line in", "= l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as", "not l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator)", "to be read backwards p_BufferSize : the size of the file chunk to", "= l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment +", "time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: #", "to read a file starting with the end, and proceeding backwards. p_FileName :", "python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1 = time.time() l_Count1 =", "to reversed tmp file, should be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'),", "of the line (to be compatible with readline() ) ''' l_Separator = bytes(p_Separator,", "with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while", "should be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line", "l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line != l_Copy: print ('|'+l_Line+'|\\n---\\n|'+l_Copy+'|')", "''' Iterator used to read a file starting with the end, and proceeding", "l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 =", "print (l_Line, end='') # read and compare original file to reversed tmp file,", "full path to the file to be read backwards p_BufferSize : the size", "as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='')", "l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield", "l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell()", "= bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() //", "l_Blocks -= 1 if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment", "of the file chunk to read into memory for processing p_Encoding : the", "keep the newline character at the end of the line (to be compatible", "''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment", ": the size of the file chunk to read into memory for processing", "= time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as l_File: for l_Line in", "# -*- coding: utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8',", "(to be compatible with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator =", "with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator", "// p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize)", "l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding)", "compatible with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if", "l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent =", "'r') as l_File: for l_Line in l_File: l_Count1 += 1 l_Moment2 = time.time()", "test algorithm # write reversed content to tmp file with open(C_TestFileName, 'w') as", "in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line != l_Copy: print ('|'+l_Line+'|\\n---\\n|'+l_Copy+'|') break os.remove(C_TestFileName)", "with open(sys.argv[1], 'r') as l_File: for l_Line in l_File: l_Count1 += 1 l_Moment2", "C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2: print ('Usage: python3", "and proceeding backwards. p_FileName : the full path to the file to be", "file, should be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if", "l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot", ": the encoding of the file, default is utf-8 p_Separator : the character(s)", "+= 1 l_Moment2 = time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize):", "either newline or space. p_KeepNewLine : keep the newline character at the end", "l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] +", "backwards p_BufferSize : the size of the file chunk to read into memory", "first read l_Blocks -= 1 if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent", "file chunk to read into memory for processing p_Encoding : the encoding of", "= bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import os import", "file to reversed tmp file, should be identical for l_Line, l_Copy in zip(open(sys.argv[1],", "path to the file to be read backwards p_BufferSize : the size of", "time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2: print ('Usage:", "the encoding of the file, default is utf-8 p_Separator : the character(s) used", "{} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm #", "at first read l_Blocks -= 1 if not l_Separator in l_BufferContent: l_Fragment =", "space. p_KeepNewLine : keep the newline character at the end of the line", "if True: # benchmark l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1], 'r')", "#################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to", "+ l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ ==", "+ l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator,", "l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2)))", "with open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) #", "/usr/bin/python3 # -*- coding: utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE,", "p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to read a file", "chunk to read into memory for processing p_Encoding : the encoding of the", "l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks", "tmp file with open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'):", "= l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]):", "__name__ == '__main__': import os import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize", "to tmp file with open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize,", "b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks =", "used to read a file starting with the end, and proceeding backwards. p_FileName", "file, default is utf-8 p_Separator : the character(s) used to separate the stream.", "print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1 = time.time()", "the line (to be compatible with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding)", "+= 1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3", "if p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0,", "else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment", "p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 -", "-*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True):", "for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print ('{}:", "# write reversed content to tmp file with open(C_TestFileName, 'w') as l_TempFile: for", ": the full path to the file to be read backwards p_BufferSize :", "io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize,", "'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>')", "the end, and proceeding backwards. p_FileName : the full path to the file", "memory for processing p_Encoding : the encoding of the file, default is utf-8", "sys.exit(0) if True: # benchmark l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1],", "- l_Moment2))) else: # test algorithm # write reversed content to tmp file", "l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2 = 0 for l_Line in", "io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read l_Blocks -= 1", "# test algorithm # write reversed content to tmp file with open(C_TestFileName, 'w')", "be read backwards p_BufferSize : the size of the file chunk to read", "p_KeepSeparator=True): ''' Iterator used to read a file starting with the end, and", "l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks *", "import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2: print", "(l_Line, end='') # read and compare original file to reversed tmp file, should", "len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark", "in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1,", "-= 1 if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else:", "the end of the line (to be compatible with readline() ) ''' l_Separator", "l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') #", "else b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks", "l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line != l_Copy: print ('|'+l_Line+'|\\n---\\n|'+l_Copy+'|') break", "read a file starting with the end, and proceeding backwards. p_FileName : the", "character(s) used to separate the stream. Usually either newline or space. p_KeepNewLine :", "to read into memory for processing p_Encoding : the encoding of the file,", "p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to read a file starting with", "= l_File.read(p_BufferSize) # might overshoot at first read l_Blocks -= 1 if not", "as l_File: for l_Line in l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2", "end, and proceeding backwards. p_FileName : the full path to the file to", "0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first", "l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import os", "l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as l_File: for l_Line", "p_Encoding : the encoding of the file, default is utf-8 p_Separator : the", "time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3", "read l_Blocks -= 1 if not l_Separator in l_BufferContent: l_Fragment = l_BufferContent +", "l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield", "C_TestBufferSize = 9182 if len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0)", "the file to be read backwards p_BufferSize : the size of the file", "into memory for processing p_Encoding : the encoding of the file, default is", "if __name__ == '__main__': import os import sys import time C_TestFileName = 'tmp.txt'", "1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 -", "p_Encoding) #################################################################################################### if __name__ == '__main__': import os import sys import time C_TestFileName", "end of the line (to be compatible with readline() ) ''' l_Separator =", "proceeding backwards. p_FileName : the full path to the file to be read", "= time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1", "TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read and compare original", "9182 if len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True:", "used to separate the stream. Usually either newline or space. p_KeepNewLine : keep", "= 9182 if len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if", "character at the end of the line (to be compatible with readline() )", "readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else", "be compatible with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator", "== '__main__': import os import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize =", "file starting with the end, and proceeding backwards. p_FileName : the full path", "1 l_Moment2 = time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2", "the newline character at the end of the line (to be compatible with", "l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in", "open(sys.argv[1], 'r') as l_File: for l_Line in l_File: l_Count1 += 1 l_Moment2 =", "l_File: for l_Line in l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2 =", "l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator,", "p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName,", "benchmark l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as l_File: for", "l_Count2 += 1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1),", "bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray() with", "# read and compare original file to reversed tmp file, should be identical", "l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment =", "p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read l_Blocks -=", "for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line != l_Copy: print", "sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2:", "'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line,", "#################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): '''", "to separate the stream. Usually either newline or space. p_KeepNewLine : keep the", "#! /usr/bin/python3 # -*- coding: utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName,", "of the file, default is utf-8 p_Separator : the character(s) used to separate", "import os import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if", "l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment,", "is utf-8 p_Separator : the character(s) used to separate the stream. Usually either", "= l_File.tell() // p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent", "2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1 =", "tmp file, should be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)):", "line (to be compatible with readline() ) ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator", "yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import os import sys import", "for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield", "p_KeepNewLine : keep the newline character at the end of the line (to", "compare original file to reversed tmp file, should be identical for l_Line, l_Copy", "the file chunk to read into memory for processing p_Encoding : the encoding", "the full path to the file to be read backwards p_BufferSize : the", "#################################################################################################### if __name__ == '__main__': import os import sys import time C_TestFileName =", "TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2", "True: # benchmark l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as", "p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0])", "as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >= 0:", "+ l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding)", "l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__':", "Usually either newline or space. p_KeepNewLine : keep the newline character at the", "size of the file chunk to read into memory for processing p_Encoding :", "in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read and compare", "l_TempFile.write(l_Line) # print (l_Line, end='') # read and compare original file to reversed", "p_FileName : the full path to the file to be read backwards p_BufferSize", "yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if", "<testfile>') sys.exit(0) if True: # benchmark l_Moment1 = time.time() l_Count1 = 0 with", "starting with the end, and proceeding backwards. p_FileName : the full path to", "the file, default is utf-8 p_Separator : the character(s) used to separate the", "l_Count1 += 1 l_Moment2 = time.time() l_Count2 = 0 for l_Line in TailFile(sys.argv[1],", "default is utf-8 p_Separator : the character(s) used to separate the stream. Usually", "print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: # test", "else: # test algorithm # write reversed content to tmp file with open(C_TestFileName,", "# print (l_Line, end='') # read and compare original file to reversed tmp", "if len(sys.argv) != 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: #", "!= 2: print ('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1", "l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time() print ('{}: {}", "+ l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment", "and compare original file to reversed tmp file, should be identical for l_Line,", "def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to read", "identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line != l_Copy:", "= 0 with open(sys.argv[1], 'r') as l_File: for l_Line in l_File: l_Count1 +=", "= 0 for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize): l_Count2 += 1 l_Moment3 = time.time()", "separate the stream. Usually either newline or space. p_KeepNewLine : keep the newline", "reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) ####################################################################################################", "p_Separator : the character(s) used to separate the stream. Usually either newline or", "l_File.read(p_BufferSize) # might overshoot at first read l_Blocks -= 1 if not l_Separator", "bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize", "tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1 = time.time() l_Count1 = 0", "reversed tmp file, should be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName,", "read backwards p_BufferSize : the size of the file chunk to read into", "p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END)", "algorithm # write reversed content to tmp file with open(C_TestFileName, 'w') as l_TempFile:", "io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used", "the stream. Usually either newline or space. p_KeepNewLine : keep the newline character", "Iterator used to read a file starting with the end, and proceeding backwards.", "encoding of the file, default is utf-8 p_Separator : the character(s) used to", "l_Moment2))) else: # test algorithm # write reversed content to tmp file with", "C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read and compare original file", "('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm", "'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks >=", "p_BufferSize while l_Blocks >= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) #", "utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\\n',", "('Usage: python3 tailfile.py <testfile>') sys.exit(0) if True: # benchmark l_Moment1 = time.time() l_Count1", "(l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm # write reversed", "l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read l_Blocks -= 1 if", "the character(s) used to separate the stream. Usually either newline or space. p_KeepNewLine", "time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as l_File: for l_Line in l_File:", "open(p_FileName, 'rb') as l_File: l_File.seek(0, io.SEEK_END) l_Blocks = l_File.tell() // p_BufferSize while l_Blocks", "l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read", "open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print", ": the character(s) used to separate the stream. Usually either newline or space.", "l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment", "for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read", ") ''' l_Separator = bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b''", "# benchmark l_Moment1 = time.time() l_Count1 = 0 with open(sys.argv[1], 'r') as l_File:", "# might overshoot at first read l_Blocks -= 1 if not l_Separator in", "for processing p_Encoding : the encoding of the file, default is utf-8 p_Separator", ">= 0: l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at", "str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment +", "0 with open(sys.argv[1], 'r') as l_File: for l_Line in l_File: l_Count1 += 1", "(l_Moment3 - l_Moment2))) else: # test algorithm # write reversed content to tmp", "processing p_Encoding : the encoding of the file, default is utf-8 p_Separator :", "l_Line in l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2 = 0 for", "a file starting with the end, and proceeding backwards. p_FileName : the full", "'__main__': import os import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182", "in l_File: l_Count1 += 1 l_Moment2 = time.time() l_Count2 = 0 for l_Line", "'\\n', p_KeepSeparator=True): ''' Iterator used to read a file starting with the end,", "stream. Usually either newline or space. p_KeepNewLine : keep the newline character at", "with the end, and proceeding backwards. p_FileName : the full path to the", "-*- coding: utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator", "l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for", "= '\\n', p_KeepSeparator=True): ''' Iterator used to read a file starting with the", "= bytes(p_Separator, p_Encoding) l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray()", "* p_BufferSize, io.SEEK_SET) l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read l_Blocks", "str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import os import sys import time", "newline or space. p_KeepNewLine : keep the newline character at the end of", "at the end of the line (to be compatible with readline() ) '''", "import sys import time C_TestFileName = 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) !=", "content to tmp file with open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1],", "yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding) for l_BufferFragment in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment", "write reversed content to tmp file with open(C_TestFileName, 'w') as l_TempFile: for l_Line", "in reversed(l_BufferFragments[1:-1]): yield str(l_BufferFragment + l_KeepSeparator, p_Encoding) l_Fragment = bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding)", "to the file to be read backwards p_BufferSize : the size of the", "bytearray(l_BufferFragments[0]) yield str(l_Fragment, p_Encoding) #################################################################################################### if __name__ == '__main__': import os import sys", "l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read and", ": keep the newline character at the end of the line (to be", "original file to reversed tmp file, should be identical for l_Line, l_Copy in", "be identical for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)): if l_Line !=", "p_Separator='\\n'): l_TempFile.write(l_Line) # print (l_Line, end='') # read and compare original file to", "overshoot at first read l_Blocks -= 1 if not l_Separator in l_BufferContent: l_Fragment", "= 'tmp.txt' C_TestBufferSize = 9182 if len(sys.argv) != 2: print ('Usage: python3 tailfile.py", "in l_BufferContent: l_Fragment = l_BufferContent + l_Fragment else: l_BufferFragments = l_BufferContent.split(l_Separator) yield str(l_BufferFragments[-1]", "= time.time() print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2))) else:", "file with open(C_TestFileName, 'w') as l_TempFile: for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\\n'): l_TempFile.write(l_Line)", "l_KeepSeparator = l_Separator if p_KeepSeparator else b'' l_Fragment = bytearray() with open(p_FileName, 'rb')", "the size of the file chunk to read into memory for processing p_Encoding", "- l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm # write reversed content", "l_Moment1), (l_Moment3 - l_Moment2))) else: # test algorithm # write reversed content to", "read into memory for processing p_Encoding : the encoding of the file, default", "newline character at the end of the line (to be compatible with readline()", "p_Encoding='utf8', p_Separator = '\\n', p_KeepSeparator=True): ''' Iterator used to read a file starting", "coding: utf-8 -*- #################################################################################################### import io #################################################################################################### def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator =" ]
[ "if skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()),", "self, context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int]", "ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count:", "to add trailing comma\", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node:", "int = parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def", "raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys()) )", "autoformatter) may make it easier to read function definitions and calls \"\"\" )", "{\"self\", \"cls\"} ) ) if skip: return updated_node else: last_param = updated_node.params[-1] return", "us to add trailing comma\", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters,", "libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\":", "= \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] = None, ) -> None:", "black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for", "# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code", "return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node:", "metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\",", "\"\"\" ) def __init__( self, context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int]", "2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION:", "textwrap from typing import Dict, Optional import libcst as cst from libcst.codemod import", "1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, } class", "def __init__( self, context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] = None,", "= parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser:", "Applying this codemod (and then an autoformatter) may make it easier to read", "as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = {", "None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in", "line - Yapf appears to do so whenever there are at least two", "help=\"Minimal number of parameters for us to add trailing comma\", type=int, default=None, )", "else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call(", "us to add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal", "in the # LICENSE file in the root directory of this source tree.", "two arguments Applying this codemod (and then an autoformatter) may make it easier", "this source tree. import argparse import textwrap from typing import Dict, Optional import", "is one parameter / argument per line if there is a trailing comma:", ") arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to add", "affiliates. # # This source code is licensed under the MIT license found", "dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to add trailing comma\", type=int,", "Platforms, Inc. and affiliates. # # This source code is licensed under the", "line if there is a trailing comma: - Black will always separate them", "to add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number", "(e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number", "\"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\"", "split headers and function calls so that there is one parameter / argument", "\"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\", type=str, default=\"black\", )", "to arguments in function headers and function calls. The idea is that both", "updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg", "type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us", "to do so whenever there are at least two arguments Applying this codemod", "in function headers and function calls. The idea is that both the black", "for us to add trailing comma\", type=int, default=None, ) def leave_Parameters( self, original_node:", "CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\":", "do so whenever there are at least two arguments Applying this codemod (and", "source code is licensed under the MIT license found in the # LICENSE", "None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys())", "# LICENSE file in the root directory of this source tree. import argparse", "directory of this source tree. import argparse import textwrap from typing import Dict,", "= ( # self.parameter_count is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params)", "function calls. The idea is that both the black and yapf autoformatters will", "< self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} )", "\"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to add trailing comma\",", "yapf autoformatters will tend to split headers and function calls so that there", "make it easier to read function definitions and calls \"\"\" ) def __init__(", "-> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\",", "len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"}", "argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or", ") if skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1],", "def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) <", "\" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int", "the black and yapf autoformatters will tend to split headers and function calls", "-> cst.Parameters: skip = ( # self.parameter_count is None or len(updated_node.params) < self.parameter_count", "least two arguments Applying this codemod (and then an autoformatter) may make it", "import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]]", "argument per line if there is a trailing comma: - Black will always", "2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds", "self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=( *updated_node.args[:-1], last_arg.with_changes(comma=cst.Comma()), ),", "help=\"Formatter to target (e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\",", "\".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or", "always separate them by line - Yapf appears to do so whenever there", "or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter", "or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if", "whenever there are at least two arguments Applying this codemod (and then an", "= None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None:", "to target (e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\",", "str = textwrap.dedent( \"\"\" Codemod that adds trailing commas to arguments in function", "add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of", "skip = ( # self.parameter_count is None or len(updated_node.params) < self.parameter_count or (", "add trailing comma\", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters,", "that adds trailing commas to arguments in function headers and function calls. The", "parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser)", "calls. The idea is that both the black and yapf autoformatters will tend", "yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of", "and affiliates. # # This source code is licensed under the MIT license", "them by line - Yapf appears to do so whenever there are at", "- Black will always separate them by line - Yapf appears to do", "from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": {", "This source code is licensed under the MIT license found in the #", "definitions and calls \"\"\" ) def __init__( self, context: CodemodContext, formatter: str =", "-> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown", "= argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\",", "leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( #", "are at least two arguments Applying this codemod (and then an autoformatter) may", "Meta Platforms, Inc. and affiliates. # # This source code is licensed under", "it easier to read function definitions and calls \"\"\" ) def __init__( self,", "source tree. import argparse import textwrap from typing import Dict, Optional import libcst", "import argparse import textwrap from typing import Dict, Optional import libcst as cst", "self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod", "updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def", "type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters:", "Optional[int] = None, argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets =", "if presets is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \"", ") ) if skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=(", "easier to read function definitions and calls \"\"\" ) def __init__( self, context:", "cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count is None or len(updated_node.params)", "Optional[int] = None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is", "appears to do so whenever there are at least two arguments Applying this", "last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self,", "trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments", "typing import Dict, Optional import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand", "last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call:", "\"cls\"} ) ) if skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes(", "\"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that", "to read function definitions and calls \"\"\" ) def __init__( self, context: CodemodContext,", "file in the root directory of this source tree. import argparse import textwrap", "in {\"self\", \"cls\"} ) ) if skip: return updated_node else: last_param = updated_node.params[-1]", "= None, argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter)", "self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) )", "to split headers and function calls so that there is one parameter /", "LICENSE file in the root directory of this source tree. import argparse import", "None, argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if", "}, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds trailing", "function calls so that there is one parameter / argument per line if", ") def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip =", "both the black and yapf autoformatters will tend to split headers and function", "calls \"\"\" ) def __init__( self, context: CodemodContext, formatter: str = \"black\", parameter_count:", "metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to add trailing comma\", type=int, default=None,", "super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown formatter {formatter!r}.", "VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\": 2,", "the MIT license found in the # LICENSE file in the root directory", "), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if", "\", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count", "# self.parameter_count is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1", "will tend to split headers and function calls so that there is one", "\"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand):", "def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = (", "} class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds trailing commas", "and yapf autoformatters will tend to split headers and function calls so that", "the # LICENSE file in the root directory of this source tree. import", "\"\"\" Codemod that adds trailing commas to arguments in function headers and function", "will always separate them by line - Yapf appears to do so whenever", "+ \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int =", "arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\", type=str, default=\"black\",", "textwrap.dedent( \"\"\" Codemod that adds trailing commas to arguments in function headers and", "self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count", "or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters", "comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for", "the root directory of this source tree. import argparse import textwrap from typing", "= { \"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2,", "arguments Applying this codemod (and then an autoformatter) may make it easier to", "cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else:", "in the root directory of this source tree. import argparse import textwrap from", "at least two arguments Applying this codemod (and then an autoformatter) may make", "root directory of this source tree. import argparse import textwrap from typing import", "# This source code is licensed under the MIT license found in the", "that both the black and yapf autoformatters will tend to split headers and", "exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"]", "dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument(", "= updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node:", "or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) ->", "Inc. and affiliates. # # This source code is licensed under the MIT", "1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip: return updated_node else:", "of arguments for us to add trailing comma\", type=int, default=None, ) def leave_Parameters(", "is that both the black and yapf autoformatters will tend to split headers", ") def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args)", "if there is a trailing comma: - Black will always separate them by", "# # This source code is licensed under the MIT license found in", "license found in the # LICENSE file in the root directory of this", "argparse import textwrap from typing import Dict, Optional import libcst as cst from", "and function calls so that there is one parameter / argument per line", "= presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist", "adds trailing commas to arguments in function headers and function calls. The idea", "{ \"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\":", "for us to add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\",", "comma\", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) ->", "cst.Parameters: skip = ( # self.parameter_count is None or len(updated_node.params) < self.parameter_count or", "or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\",", "found in the # LICENSE file in the root directory of this source", "headers and function calls so that there is one parameter / argument per", "/ argument per line if there is a trailing comma: - Black will", "trailing comma: - Black will always separate them by line - Yapf appears", "so whenever there are at least two arguments Applying this codemod (and then", "licensed under the MIT license found in the # LICENSE file in the", "and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip: return updated_node else: last_param", "len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=( *updated_node.args[:-1],", "Optional import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str,", "from typing import Dict, Optional import libcst as cst from libcst.codemod import CodemodContext,", ") -> cst.Parameters: skip = ( # self.parameter_count is None or len(updated_node.params) <", "import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\": 1,", "separate them by line - Yapf appears to do so whenever there are", "original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count is", "is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \" + \",", "MIT license found in the # LICENSE file in the root directory of", "cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes(", "__init__( self, context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] = None, argument_count:", "skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ),", "is a trailing comma: - Black will always separate them by line -", ") arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to add", "idea is that both the black and yapf autoformatters will tend to split", "updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count is None or", "self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument(", "Dict[str, int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": {", "there are at least two arguments Applying this codemod (and then an autoformatter)", "Codemod that adds trailing commas to arguments in function headers and function calls.", "then an autoformatter) may make it easier to read function definitions and calls", "help=\"Minimal number of arguments for us to add trailing comma\", type=int, default=None, )", "-> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return", "\"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2,", "presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None:", "params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, )", "type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us", "presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to", "cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg =", ") self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count: int = argument_count or presets[\"argument_count\"]", "arguments in function headers and function calls. The idea is that both the", "DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds trailing commas to arguments in", "code is licensed under the MIT license found in the # LICENSE file", "of this source tree. import argparse import textwrap from typing import Dict, Optional", "is licensed under the MIT license found in the # LICENSE file in", "arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to add trailing", "dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to add trailing comma\", type=int,", "default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to", "function headers and function calls. The idea is that both the black and", "this codemod (and then an autoformatter) may make it easier to read function", "Black will always separate them by line - Yapf appears to do so", "def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g.", "number of parameters for us to add trailing comma\", type=int, default=None, ) arg_parser.add_argument(", "and function calls. The idea is that both the black and yapf autoformatters", "function definitions and calls \"\"\" ) def __init__( self, context: CodemodContext, formatter: str", "default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to", "presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets", "may make it easier to read function definitions and calls \"\"\" ) def", "original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node", "None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise", "\"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str =", "( len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip:", "return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=( *updated_node.args[:-1], last_arg.with_changes(comma=cst.Comma()), ), )", "parameter_count: Optional[int] = None, argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets", "arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to add trailing", "formatter: str = \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] = None, )", "import textwrap from typing import Dict, Optional import libcst as cst from libcst.codemod", "presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\": 2, },", "trailing comma\", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, )", "( # self.parameter_count is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) ==", "there is a trailing comma: - Black will always separate them by line", "calls so that there is one parameter / argument per line if there", "target (e.g. yapf or black)\", type=str, default=\"black\", ) arg_parser.add_argument( \"--paramter-count\", dest=\"parameter_count\", metavar=\"PARAMETER_COUNT\", help=\"Minimal", "None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf or black)\", type=str,", "self.parameter_count is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and", "len(updated_node.params) == 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip: return", "{formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count", "cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count is None", "import Dict, Optional import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter:", ") -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError(", "updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call,", "a trailing comma: - Black will always separate them by line - Yapf", "if len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=(", "Dict, Optional import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str,", "presets is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for \" +", "\"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, }", "- Yapf appears to do so whenever there are at least two arguments", "codemod (and then an autoformatter) may make it easier to read function definitions", "f\"Unknown formatter {formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int", "commas to arguments in function headers and function calls. The idea is that", "and calls \"\"\" ) def __init__( self, context: CodemodContext, formatter: str = \"black\",", "so that there is one parameter / argument per line if there is", "= textwrap.dedent( \"\"\" Codemod that adds trailing commas to arguments in function headers", "parameters for us to add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\", dest=\"argument_count\",", "updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call,", "None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown formatter", ") -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1]", "return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), )", "metavar=\"PARAMETER_COUNT\", help=\"Minimal number of parameters for us to add trailing comma\", type=int, default=None,", "for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets[\"parameter_count\"] self.argument_count:", "one parameter / argument per line if there is a trailing comma: -", "is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value", "parameter / argument per line if there is a trailing comma: - Black", "context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] =", "Yapf appears to do so whenever there are at least two arguments Applying", "class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds trailing commas to", "argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\",", "tree. import argparse import textwrap from typing import Dict, Optional import libcst as", "{ \"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent(", "under the MIT license found in the # LICENSE file in the root", "there is one parameter / argument per line if there is a trailing", "*updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) ->", "arguments for us to add trailing comma\", type=int, default=None, ) def leave_Parameters( self,", "libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] =", "of parameters for us to add trailing comma\", type=int, default=None, ) arg_parser.add_argument( \"--argument-count\",", "< self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=( *updated_node.args[:-1], last_arg.with_changes(comma=cst.Comma()),", "updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip: return updated_node else: last_param =", ") def __init__( self, context: CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] =", "leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count:", "CodemodContext, formatter: str = \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] = None,", "headers and function calls. The idea is that both the black and yapf", "per line if there is a trailing comma: - Black will always separate", "add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target (e.g. yapf", "that there is one parameter / argument per line if there is a", "(c) Meta Platforms, Inc. and affiliates. # # This source code is licensed", "int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\":", "comma: - Black will always separate them by line - Yapf appears to", "Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is", "self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return", "\"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] = None, ) -> None: super().__init__(context)", "trailing commas to arguments in function headers and function calls. The idea is", "\"--argument-count\", dest=\"argument_count\", metavar=\"ARGUMENT_COUNT\", help=\"Minimal number of arguments for us to add trailing comma\",", "str = \"black\", parameter_count: Optional[int] = None, argument_count: Optional[int] = None, ) ->", "black and yapf autoformatters will tend to split headers and function calls so", "presets_per_formatter.get(formatter) if presets is None: raise ValueError( f\"Unknown formatter {formatter!r}. Presets exist for", "(and then an autoformatter) may make it easier to read function definitions and", "Presets exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or", "by line - Yapf appears to do so whenever there are at least", "{ \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, },", "The idea is that both the black and yapf autoformatters will tend to", "autoformatters will tend to split headers and function calls so that there is", "number of arguments for us to add trailing comma\", type=int, default=None, ) def", "an autoformatter) may make it easier to read function definitions and calls \"\"\"", "default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip", "int = argument_count or presets[\"argument_count\"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\",", "2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod", "formatter {formatter!r}. Presets exist for \" + \", \".join(presets_per_formatter.keys()) ) self.parameter_count: int =", "Dict[str, Dict[str, int]] = { \"black\": { \"parameter_count\": 1, \"argument_count\": 2, }, \"yapf\":", "read function definitions and calls \"\"\" ) def __init__( self, context: CodemodContext, formatter:", "}, \"yapf\": { \"parameter_count\": 2, \"argument_count\": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str", "== 1 and updated_node.params[0].name.value in {\"self\", \"cls\"} ) ) if skip: return updated_node", "AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( \"\"\" Codemod that adds trailing commas to arguments", "cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { \"black\":", "argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets", "tend to split headers and function calls so that there is one parameter", "@staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( \"--formatter\", dest=\"formatter\", metavar=\"FORMATTER\", help=\"Formatter to target" ]
[ "Panel # === Configure the simulator ================================================ duration = 100 dt = 0.01", "plt import pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel # === Configure", "= sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the", "with the Izhikevich neuron model. \"\"\" import numpy as np import matplotlib.pyplot as", "100 v_init = -64 input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons", "0.0005 * np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u',", "{'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0} params = class_2 n =", "numpy as np import matplotlib.pyplot as plt import pyNN.nest as sim from pyNN.utility.plotting", "first_spiketimes = [] rates = [] for spiketrain in data.spiketrains: if len(spiketrain) ==", "======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2 =", "for spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) /", "label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential", "ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0]", "[] rates = [] for spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty)", "network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2", "pyNN.utility.plotting import Figure, Panel # === Configure the simulator ================================================ duration = 100", "pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel # === Configure the simulator", "= 100 v_init = -64 input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e)", "from pyNN.utility.plotting import Figure, Panel # === Configure the simulator ================================================ duration =", "= 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the network ======================================= phasic_spiking", "instrument the network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd':", "np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u", "# === Build and instrument the network ======================================= phasic_spiking = {'a': 0.02, 'b':", "'d': 0} params = class_2 n = 100 v_init = -64 input_currents =", "u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration) # === Save the results,", "0} params = class_2 n = 100 v_init = -64 input_currents = 0.0005", "/ np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0]", "plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time", "0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the network ======================================= phasic_spiking =", "as sim from pyNN.utility.plotting import Figure, Panel # === Configure the simulator ================================================", "figure ============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates = [] for spiketrain", "import pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel # === Configure the", "===================================================== sim.run(duration) # === Save the results, optionally plot a figure ============================= data", "plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates = []", "input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params))", "'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration) # === Save", "import matplotlib.pyplot as plt import pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel", "'b': 0.25, 'c': -65, 'd': 6} class_2 = {'a': 0.2, 'b': 0.26, 'c':", "neuron model. \"\"\" import numpy as np import matplotlib.pyplot as plt import pyNN.nest", "params = class_2 n = 100 v_init = -64 input_currents = 0.0005 *", "data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u,", "sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation =====================================================", "0.25, 'c': -65, 'd': 6} class_2 = {'a': 0.2, 'b': 0.26, 'c': -65,", "Build and instrument the network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c':", "class_2 n = 100 v_init = -64 input_currents = 0.0005 * np.logspace(-4, 6,", "neurons.get_data().segments[0] first_spiketimes = [] rates = [] for spiketrain in data.spiketrains: if len(spiketrain)", "rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate')", "potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean", "-65, 'd': 0} params = class_2 n = 100 v_init = -64 input_currents", "0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs')", "results, optionally plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates", "# === Configure the simulator ================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt,", "data = neurons.get_data().segments[0] first_spiketimes = [] rates = [] for spiketrain in data.spiketrains:", "as plt import pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel # ===", "= class_2 n = 100 v_init = -64 input_currents = 0.0005 * np.logspace(-4,", "plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v", "'d': 6} class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0} params", "np import matplotlib.pyplot as plt import pyNN.nest as sim from pyNN.utility.plotting import Figure,", "0.26, 'c': -65, 'd': 0} params = class_2 n = 100 v_init =", "'b': 0.26, 'c': -65, 'd': 0} params = class_2 n = 100 v_init", "-64 input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents,", "= -64 input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n,", "# === Run the simulation ===================================================== sim.run(duration) # === Save the results, optionally", "/ duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend()", "min_delay=0.1) # === Build and instrument the network ======================================= phasic_spiking = {'a': 0.02,", "len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes),", "data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem')", "if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 /", "'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration) # ===", "np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init,", "=== Save the results, optionally plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes", "neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run", "sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the network ======================================= phasic_spiking = {'a':", "=== Run the simulation ===================================================== sim.run(duration) # === Save the results, optionally plot", "import numpy as np import matplotlib.pyplot as plt import pyNN.nest as sim from", "n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) #", "\"\"\" Tests with the Izhikevich neuron model. \"\"\" import numpy as np import", "neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration) #", "yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean up and quit ======================================================== sim.end()", "the network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6}", "label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u =", "= 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v',", "= {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2 = {'a': 0.2,", "Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') #", "-65, 'd': 6} class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0}", "data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1", "first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg", "sim from pyNN.utility.plotting import Figure, Panel # === Configure the simulator ================================================ duration", "simulation ===================================================== sim.run(duration) # === Save the results, optionally plot a figure =============================", "model. \"\"\" import numpy as np import matplotlib.pyplot as plt import pyNN.nest as", "import Figure, Panel # === Configure the simulator ================================================ duration = 100 dt", "plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v,", "class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0} params = class_2", "first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents,", "as np import matplotlib.pyplot as plt import pyNN.nest as sim from pyNN.utility.plotting import", "plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True,", "= data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True),", "sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation", "6} class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0} params =", "the simulator ================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # ===", "and instrument the network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65,", "Figure, Panel # === Configure the simulator ================================================ duration = 100 dt =", "phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2 = {'a':", "spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration)", "100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the network", "=== Build and instrument the network ======================================= phasic_spiking = {'a': 0.02, 'b': 0.25,", "n = 100 v_init = -64 input_currents = 0.0005 * np.logspace(-4, 6, n,", "= data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable", "Configure the simulator ================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) #", "optionally plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates =", "spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\",", "================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and", "simulator ================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build", "base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # ===", "xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean up and", "**params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration)", "= {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0} params = class_2 n", "0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2 = {'a': 0.2, 'b': 0.26,", "a figure ============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates = [] for", "dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the network =======================================", "in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents,", "Save the results, optionally plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes =", "xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean up and quit", "[] for spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain)", "the simulation ===================================================== sim.run(duration) # === Save the results, optionally plot a figure", "{'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6} class_2 = {'a': 0.2, 'b':", "neurons.initialize(v=v_init, u=-params['b']*v_init) # === Run the simulation ===================================================== sim.run(duration) # === Save the", "(mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean up", "'c': -65, 'd': 0} params = class_2 n = 100 v_init = -64", "6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes']) neurons.initialize(v=v_init, u=-params['b']*v_init)", "rates = [] for spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else:", "* np.logspace(-4, 6, n, base=np.e) neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params)) neurons.record(['v', 'u', 'spikes'])", "v_init = -64 input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e) neurons =", "\"\"\" import numpy as np import matplotlib.pyplot as plt import pyNN.nest as sim", "matplotlib.pyplot as plt import pyNN.nest as sim from pyNN.utility.plotting import Figure, Panel #", "duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument", "1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI') v =", "v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\",", "else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates,", "Tests with the Izhikevich neuron model. \"\"\" import numpy as np import matplotlib.pyplot", "the Izhikevich neuron model. \"\"\" import numpy as np import matplotlib.pyplot as plt", "u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u", "# === Save the results, optionally plot a figure ============================= data = neurons.get_data().segments[0]", "============================= data = neurons.get_data().segments[0] first_spiketimes = [] rates = [] for spiketrain in", "(ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # === Clean up and quit ========================================================", "sim.run(duration) # === Save the results, optionally plot a figure ============================= data =", "'c': -65, 'd': 6} class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd':", "= [] for spiketrain in data.spiketrains: if len(spiketrain) == 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0])", "the results, optionally plot a figure ============================= data = neurons.get_data().segments[0] first_spiketimes = []", "= neurons.get_data().segments[0] first_spiketimes = [] rates = [] for spiketrain in data.spiketrains: if", "Run the simulation ===================================================== sim.run(duration) # === Save the results, optionally plot a", "Izhikevich neuron model. \"\"\" import numpy as np import matplotlib.pyplot as plt import", "= 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1) # === Build and instrument the", "=== Configure the simulator ================================================ duration = 100 dt = 0.01 sim.setup(timestep=dt, min_delay=0.1)", "rates, label='avg spikerate') plt.legend() plt.savefig('FI') v = data.filter(name=\"v\")[0] u = data.filter(name=\"u\")[0] Figure(Panel(v, ylabel=\"Membrane", "0.2, 'b': 0.26, 'c': -65, 'd': 0} params = class_2 n = 100", "duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse ttfs') plt.scatter(input_currents, rates, label='avg spikerate') plt.legend() plt.savefig('FI')", "== 0: first_spiketimes.append(np.infty) else: first_spiketimes.append(spiketrain[0]) rates.append(np.count_nonzero(spiketrain) / duration) plt.scatter(input_currents, 1 / np.array(first_spiketimes), label='inverse", "= [] rates = [] for spiketrain in data.spiketrains: if len(spiketrain) == 0:", "ylabel=\"Membrane potential (mV)\", xticks=True, xlabel=\"Time (ms)\", yticks=True), Panel(u, ylabel=\"u variable (units?)\")).save('mem') # ===" ]
[ "for i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for i in", "< y2 and z1 >= x >= z2 and y1 <= w <=", "y2 and z1 >= x >= z2 and y1 <= w <= y2)", "i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list)", "print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist >", "cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1", "(123, 15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p) if __name__ == \"__main__\":", "+ path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i in", "get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return", "> y2 and z1 <= x <= z2 and y1 >= w >=", "path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"])", "< y2 and z1 <= x <= z2 and y1 <= w <=", "<= x <= z2 and y1 >= w >= y2) or\\ (z1 >", "in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] +", "None if (z1 < z2 and y1 < y2 and z1 <= x", "check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w =", "< z2 and y1 > y2 and z1 <= x <= z2 and", "return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1] y2", "'x' coord is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1],", "point in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0]) +", "+ path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal", "vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1 = horiz[0][0]", "len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute coords of line segments", "y2 = vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return", "c2[0] + 1): coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0] + 1):", "= (x, w) # if to_return: print(\"<< %s :: %s >> == %s\"", "(x, w) # if to_return: print(\"<< %s :: %s >> == %s\" %", "coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test():", "Dist: %d\" % dist) def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0)", "intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest", "lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])): #", "(z1 > z2 and y1 > y2 and z1 >= x >= z2", "in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute coords of", "for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg", "c1[1])) else: for i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse()", "def check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w", "path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1],", "and z1 <= x <= z2 and y1 >= w >= y2) or\\", "for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute", "print(\"<< %s :: %s >> == %s\" % (horiz, vert, (x,w))) return to_return", "y2) : to_return = (x, w) # if to_return: print(\"<< %s :: %s", "c2[1] + 1): coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1] + 1):", "path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative coords of path", "\"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0]", "w <= y2) or\\ (z1 > z2 and y1 < y2 and z1", "# Segregate vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for", "z2 and y1 <= w <= y2) or\\ (z1 > z2 and y1", "path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative coords of path path1_list =", "1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) #", "is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif", "== c2[1]: if c1[0] < c2[0]: for i in range(c1[0], c2[0] + 1):", "(-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\":", "path intersects with vertical line of other abd vice-versa for h_seg in path1[\"horizontal\"]:", "print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check if", "path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) #", "path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])): #", "y1 = vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2", "print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\"", "\"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz,", "y1 > y2 and z1 <= x <= z2 and y1 >= w", "(path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and", "coord_list = [] if c1[0] == c2[0]: if c1[1] < c2[1]: for i", "get_all_points_on_path(c1, c2): coord_list = [] if c1[0] == c2[0]: if c1[1] < c2[1]:", "range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list)", "= path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative coords of path path1_list", "path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines", "[] if c1[0] == c2[0]: if c1[1] < c2[1]: for i in range(c1[1],", "path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i", "= vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return =", "len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i", "path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list)", "in range(1, len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i - 1][0] ==", "path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i in", "path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal line", "return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123, 67),", "Get absolute coords of line segments path1 = {\"complete\": [(0, 0)]} for i", "+ abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist) def", "coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]: if c1[0] < c2[0]: for", "dist) def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0] ==", "coord is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i]))", "and y1 > y2 and z1 >= x >= z2 and y1 >=", "path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i in range(0, len(path2_list)): if i:", "c1[0] == c2[0]: if c1[1] < c2[1]: for i in range(c1[1], c2[1] +", "i in range(1, len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i - 1][0]", "[] path2_list = [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in", "<= y2) or\\ (z1 < z2 and y1 > y2 and z1 <=", "vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list = [] if c1[0] ==", "= vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1 =", "== path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i", "- 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] ==", "(z1 < z2 and y1 < y2 and z1 <= x <= z2", "for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if", "\"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1]", "abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])):", "elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\": return (0,", "(horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list = [] if c1[0]", "# print(path1_list) # print(path2_list) # Get relative coords of path path1_list = []", "\"r\") as f: path1 = f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str", "line of other abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]:", "y1 <= w <= y2) or\\ (z1 < z2 and y1 > y2", ">> == %s\" % (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list", "== path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" %", "return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0] ==", "and y1 <= w <= y2) or\\ (z1 < z2 and y1 >", "range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute coords of line", "<= z2 and y1 >= w >= y2) or\\ (z1 > z2 and", "int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x =", "<= y2) or\\ (z1 > z2 and y1 < y2 and z1 >=", "c2[1]: for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for i", "intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg)", "h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point:", "intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if", "x <= z2 and y1 >= w >= y2) or\\ (z1 > z2", "i in range(1, len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i - 1][0]", "cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:]))", "w >= y2) or\\ (z1 > z2 and y1 > y2 and z1", "for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if", "path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])): # 'x'", "sys def puzzle(filename): with open(filename, \"r\") as f: path1 = f.readline() path2 =", "= get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p) if", "path1_list = [] path2_list = [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for", "abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if cmd[0] ==", "puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123, 67), (123, 15)) p =", "# Get absolute coords of line segments path1 = {\"complete\": [(0, 0)]} for", "== \"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def", "path path1_list = [] path2_list = [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i]))", "and y1 <= w <= y2) or\\ (z1 > z2 and y1 <", "% path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" %", "open(filename, \"r\") as f: path1 = f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\")", "== path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for", "y2 and z1 >= x >= z2 and y1 >= w >= y2)", "#!/usr/bin/python3 -u import sys def puzzle(filename): with open(filename, \"r\") as f: path1 =", "intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist", "path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])): # 'x' coord is same", "to_return = None if (z1 < z2 and y1 < y2 and z1", "Segregate vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i", "[] for i in range(1, len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i", "c2[0]: if c1[1] < c2[1]: for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0],", "= horiz[1][0] to_return = None if (z1 < z2 and y1 < y2", "- 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\"", "to_return: print(\"<< %s :: %s >> == %s\" % (horiz, vert, (x,w))) return", "with vertical line of other abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg", "if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i -", "(path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0,", "+ 1): coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0],", "if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i -", "= check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in", "[] path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])): # 'x' coord is", "h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point:", "1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"]", "and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i in range(1,", "in range(1, len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i - 1][0] ==", "0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return", "0) elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif cmd[0] == \"D\": return", ">= x >= z2 and y1 >= w >= y2) : to_return =", ">= w >= y2) : to_return = (x, w) # if to_return: print(\"<<", "%s\" % (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list = []", "- 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i in range(1,", "range(1, len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]:", "path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\"", "path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"] = []", "z2 and y1 < y2 and z1 <= x <= z2 and y1", "path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) #", "(z1 < z2 and y1 > y2 and z1 <= x <= z2", "path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = []", "main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123, 67), (123, 15)) p", "for i in range(1, len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i -", "1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]: if c1[0] < c2[0]:", "{\"complete\": [(0, 0)]} for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1,", "# Get relative coords of path path1_list = [] path2_list = [] for", "v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in", "in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point)", "== c2[0]: if c1[1] < c2[1]: for i in range(c1[1], c2[1] + 1):", "1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def", "coords of line segments path1 = {\"complete\": [(0, 0)]} for i in range(0,", "vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0]", "path2_list = [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0,", "# 'x' coord is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i -", "% (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list = [] if", "if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]:", "get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p) if __name__", "else: for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif", "horizontal line of one path intersects with vertical line of other abd vice-versa", "v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point", "is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif", "same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i", "else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"]", "v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in", "= horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return = None if (z1", "path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i -", "w >= y2) : to_return = (x, w) # if to_return: print(\"<< %s", "(abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist)", "for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) #", "vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return = None", "test() def test(): p = get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67),", "% path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal", "(int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\":", "z2 and y1 >= w >= y2) or\\ (z1 > z2 and y1", "print(path1_list) # print(path2_list) # Get relative coords of path path1_list = [] path2_list", "path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for", "<= w <= y2) or\\ (z1 < z2 and y1 > y2 and", "intersects with vertical line of other abd vice-versa for h_seg in path1[\"horizontal\"]: for", "path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for", "and z1 >= x >= z2 and y1 <= w <= y2) or\\", "- 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i]))", "path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list)", "y1 <= w <= y2) or\\ (z1 > z2 and y1 < y2", "w) # if to_return: print(\"<< %s :: %s >> == %s\" % (horiz,", "line segments path1 = {\"complete\": [(0, 0)]} for i in range(0, len(path1_list)): if", "i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for i in range(c2[1],", "-int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1] y2 = vert[1][1]", "for i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list", "i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1]", "x >= z2 and y1 >= w >= y2) : to_return = (x,", "vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg)", "def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\":", "[(0, 0)]} for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0]", "= [] # Check if horizontal line of one path intersects with vertical", "dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\"", "# coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p =", "and z1 >= x >= z2 and y1 >= w >= y2) :", "if c1[1] < c2[1]: for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i))", "print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal line of one", "path2 = {\"complete\": [(0, 0)]} for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i", "y2 and z1 <= x <= z2 and y1 >= w >= y2)", "Get relative coords of path path1_list = [] path2_list = [] for i", "i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0])", "other abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point =", "path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for", "of line segments path1 = {\"complete\": [(0, 0)]} for i in range(0, len(path1_list)):", "c2[0]: for i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for i", "for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0],", "< z2 and y1 < y2 and z1 <= x <= z2 and", "absolute coords of line segments path1 = {\"complete\": [(0, 0)]} for i in", "return (0, int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert):", "Check if horizontal line of one path intersects with vertical line of other", "(0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1 = vert[0][1] y2 =", "i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1]", "67), (123, 15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p) if __name__ ==", "cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0, int(cmd[1:]))", "path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1]", "1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]:", "len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else:", "1): coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i))", "> z2 and y1 < y2 and z1 >= x >= z2 and", "# Check if horizontal line of one path intersects with vertical line of", "% dist) def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0]", "# print(path2_list) # Get relative coords of path path1_list = [] path2_list =", "path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i in range(0,", "= f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) #", "same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i", "one path intersects with vertical line of other abd vice-versa for h_seg in", "= check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for", "segments path1 = {\"complete\": [(0, 0)]} for i in range(0, len(path1_list)): if i:", "elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0,", "# test() def test(): p = get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123,", ">= x >= z2 and y1 <= w <= y2) or\\ (z1 <", "range(1, len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]:", "z1 >= x >= z2 and y1 <= w <= y2) or\\ (z1", "abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if cmd[0] == \"R\": return", "coord_list.reverse() elif c1[1] == c2[1]: if c1[0] < c2[0]: for i in range(c1[0],", "horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])):", "path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] =", "= None if (z1 < z2 and y1 < y2 and z1 <=", "def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123, 67), (123, 15))", "c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p", "c1[1] < c2[1]: for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else:", "in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] +", "to_return def get_all_points_on_path(c1, c2): coord_list = [] if c1[0] == c2[0]: if c1[1]", "if dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist:", "c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]: if c1[0]", "c1[0] < c2[0]: for i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else:", "+ 1): coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0] + 1): coord_list.append((i,", "abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point = check_intersection(h_seg,", "== %s\" % (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list =", "+ abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if cmd[0] == \"R\":", "and y1 < y2 and z1 <= x <= z2 and y1 <=", "{\"complete\": [(0, 0)]} for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1,", "or\\ (z1 < z2 and y1 > y2 and z1 <= x <=", "and y1 >= w >= y2) or\\ (z1 > z2 and y1 >", "else: for i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return", "elif c1[1] == c2[1]: if c1[0] < c2[0]: for i in range(c1[0], c2[0]", "for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for i in", "+ 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 =", ">= w >= y2) or\\ (z1 > z2 and y1 > y2 and", "c1[1] == c2[1]: if c1[0] < c2[0]: for i in range(c1[0], c2[0] +", "# 'x' coord is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i -", "def test(): p = get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67), (123,", "coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123,", "path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"])", "= path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative coords", "and z1 <= x <= z2 and y1 <= w <= y2) or\\", ">= z2 and y1 <= w <= y2) or\\ (z1 < z2 and", "if horizontal line of one path intersects with vertical line of other abd", "if c1[0] < c2[0]: for i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1]))", "# print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check", "+ 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") # test()", "and y1 >= w >= y2) : to_return = (x, w) # if", "print(path1_list) # print(path2_list) # Get absolute coords of line segments path1 = {\"complete\":", "z2 = horiz[1][0] to_return = None if (z1 < z2 and y1 <", "path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute coords of line segments path1", "z1 <= x <= z2 and y1 <= w <= y2) or\\ (z1", "path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1],", "= [] for i in range(1, len(path2[\"complete\"])): # 'x' coord is same if", "-u import sys def puzzle(filename): with open(filename, \"r\") as f: path1 = f.readline()", "len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else:", "\"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0]", "== \"L\": return (-int(cmd[1:]), 0) elif cmd[0] == \"U\": return (0, int(cmd[1:])) elif", "elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" %", "path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) +", "c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\") #", "for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0],", "i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0])", "> (abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" %", "i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for i in range(c2[0],", "path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2", "path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])): # 'x' coord is same", "z1 >= x >= z2 and y1 >= w >= y2) : to_return", "in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]:", "coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) #", "abs(point[1])): dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd):", "print(path2_list) # Get relative coords of path path1_list = [] path2_list = []", "and y1 > y2 and z1 <= x <= z2 and y1 >=", "c2[1]: if c1[0] < c2[0]: for i in range(c1[0], c2[0] + 1): coord_list.append((i,", ">= y2) : to_return = (x, w) # if to_return: print(\"<< %s ::", "coord_list def main(): puzzle(\"input_day_3_1.txt\") # test() def test(): p = get_all_points_on_path((123, 67), (123,", "= {\"complete\": [(0, 0)]} for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i +", ">= z2 and y1 >= w >= y2) : to_return = (x, w)", "1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]:", "if to_return: print(\"<< %s :: %s >> == %s\" % (horiz, vert, (x,w)))", "path1 = {\"complete\": [(0, 0)]} for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i", "if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]),", "< c2[0]: for i in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for", "= [] if c1[0] == c2[0]: if c1[1] < c2[1]: for i in", "in path2[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]:", "path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"]", "< c2[1]: for i in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for", "= [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)):", "range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1] +", "x >= z2 and y1 <= w <= y2) or\\ (z1 < z2", "<= z2 and y1 <= w <= y2) or\\ (z1 > z2 and", "for point in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0])", "def puzzle(filename): with open(filename, \"r\") as f: path1 = f.readline() path2 = f.readline()", "puzzle(filename): with open(filename, \"r\") as f: path1 = f.readline() path2 = f.readline() path1_list_str", "# print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list", "i)) # coord_list.reverse() elif c1[1] == c2[1]: if c1[0] < c2[0]: for i", "y2 and z1 <= x <= z2 and y1 <= w <= y2)", "with open(filename, \"r\") as f: path1 = f.readline() path2 = f.readline() path1_list_str =", "path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i", "y2) or\\ (z1 < z2 and y1 > y2 and z1 <= x", "- 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i]))", "path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal line of one path intersects", "z2 and y1 <= w <= y2) or\\ (z1 < z2 and y1", "in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) #", "z2 and y1 > y2 and z1 <= x <= z2 and y1", "i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def", "[(0, 0)]} for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0]", "z2 and y1 > y2 and z1 >= x >= z2 and y1", "y1 < y2 and z1 >= x >= z2 and y1 <= w", "[] for i in range(1, len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i", "= [] for i in range(1, len(path1[\"complete\"])): # 'x' coord is same if", "+ path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]}", "= f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list)", "or\\ (z1 > z2 and y1 > y2 and z1 >= x >=", "(0, int(cmd[1:])) elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x", "# print(path2_list) # Get absolute coords of line segments path1 = {\"complete\": [(0,", "for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1]", "and y1 < y2 and z1 >= x >= z2 and y1 <=", "i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get absolute coords", "import sys def puzzle(filename): with open(filename, \"r\") as f: path1 = f.readline() path2", "path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative coords of", "== \"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif", "elif path1[\"complete\"][i - 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = []", "range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1]))", "%s >> == %s\" % (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1, c2):", "1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = []", "<= w <= y2) or\\ (z1 > z2 and y1 < y2 and", "x <= z2 and y1 <= w <= y2) or\\ (z1 > z2", "return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0) elif cmd[0] ==", "v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist =", "print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]),", "(x,w))) return to_return def get_all_points_on_path(c1, c2): coord_list = [] if c1[0] == c2[0]:", "%d\" % dist) def get_coord(cmd): if cmd[0] == \"R\": return (int(cmd[1:]), 0) elif", "path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] =", "def get_all_points_on_path(c1, c2): coord_list = [] if c1[0] == c2[0]: if c1[1] <", "'x' coord is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1],", "path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) # Get", "test(): p = get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67), (123, 67))", "as f: path1 = f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str =", ":: %s >> == %s\" % (horiz, vert, (x,w))) return to_return def get_all_points_on_path(c1,", "for i in range(1, len(path2[\"complete\"])): # 'x' coord is same if path2[\"complete\"][i -", "0)]} for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] +", "= horiz[0][0] z2 = horiz[1][0] to_return = None if (z1 < z2 and", "z1 <= x <= z2 and y1 >= w >= y2) or\\ (z1", "%s :: %s >> == %s\" % (horiz, vert, (x,w))) return to_return def", "# print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) #", "range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]: if", "coords of path path1_list = [] path2_list = [] for i in range(0,", "p = get_all_points_on_path((123, 67), (123, 15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p)", "or\\ (z1 > z2 and y1 < y2 and z1 >= x >=", "w = horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return = None if", "z2 and y1 < y2 and z1 >= x >= z2 and y1", "path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"])", "= [] path2_list = [] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i", "i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] ==", "print(path2_list) # Get absolute coords of line segments path1 = {\"complete\": [(0, 0)]}", "% path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal line of one path", "i)) else: for i in range(c2[1], c1[1] + 1): coord_list.append((c1[0], i)) # coord_list.reverse()", "horiz[0][1] z1 = horiz[0][0] z2 = horiz[1][0] to_return = None if (z1 <", "len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i])) # print(path1_list) # print(path2_list) #", "# if to_return: print(\"<< %s :: %s >> == %s\" % (horiz, vert,", "y1 >= w >= y2) : to_return = (x, w) # if to_return:", "path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1]", "1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])):", "z2 and y1 >= w >= y2) : to_return = (x, w) #", "+ abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist", "in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point)", "to_return = (x, w) # if to_return: print(\"<< %s :: %s >> ==", "if (z1 < z2 and y1 < y2 and z1 <= x <=", "[] path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])): # 'x' coord is", "dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist > (abs(point[0])", "(z1 > z2 and y1 < y2 and z1 >= x >= z2", "c2): coord_list = [] if c1[0] == c2[0]: if c1[1] < c2[1]: for", "coord is same if path2[\"complete\"][i - 1][0] == path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i]))", "path2[\"vertical\"] = [] path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])): # 'x'", "+ 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate", "abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist =", "[] for i in range(0, len(path1_list_str)): path1_list.append(get_coord(path1_list_str[i])) for i in range(0, len(path2_list_str)): path2_list.append(get_coord(path2_list_str[i]))", "cmd[0] == \"R\": return (int(cmd[1:]), 0) elif cmd[0] == \"L\": return (-int(cmd[1:]), 0)", ">= y2) or\\ (z1 > z2 and y1 > y2 and z1 >=", "<= x <= z2 and y1 <= w <= y2) or\\ (z1 >", "coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) #", "dist = abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if", "= vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1 = horiz[0][0] z2 =", "check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point", "+ 1): coord_list.append((c1[0], i)) # coord_list.reverse() elif c1[1] == c2[1]: if c1[0] <", "% path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = []", "in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0])", "= abs(point[0]) + abs(point[1]) print(\"Shortest Dist: %d\" % dist) def get_coord(cmd): if cmd[0]", "horiz[1][0] to_return = None if (z1 < z2 and y1 < y2 and", "> z2 and y1 > y2 and z1 >= x >= z2 and", "# coord_list.reverse() elif c1[1] == c2[1]: if c1[0] < c2[0]: for i in", "path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"])", "path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"]", "w <= y2) or\\ (z1 < z2 and y1 > y2 and z1", "path1 = f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") #", "f: path1 = f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\")", "y1 >= w >= y2) or\\ (z1 > z2 and y1 > y2", "if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point =", "x = vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w = horiz[0][1] z1", "range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0] +", "# print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] # Check if horizontal line of", "z1 = horiz[0][0] z2 = horiz[1][0] to_return = None if (z1 < z2", "> y2 and z1 >= x >= z2 and y1 >= w >=", "y2) or\\ (z1 > z2 and y1 > y2 and z1 >= x", "path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i in range(0, len(path2_list)): if", "[] # Check if horizontal line of one path intersects with vertical line", "return to_return def get_all_points_on_path(c1, c2): coord_list = [] if c1[0] == c2[0]: if", "intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1])", "1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\":", "+ path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical and horizontal lines path1[\"vertical\"] =", "== \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0] y1 =", "check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]:", "range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main(): puzzle(\"input_day_3_1.txt\")", "in range(c1[1], c2[1] + 1): coord_list.append((c1[0], i)) else: for i in range(c2[1], c1[1]", "- 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) #", "if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] + path1_list[i][0], path1[\"complete\"][i][1] + path1_list[i][1])) else: path1[\"complete\"].insert(1,", "print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list =", "vertical and horizontal lines path1[\"vertical\"] = [] path1[\"horizontal\"] = [] for i in", "= abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1]) for point in intersection_points_list[1:]: if dist > (abs(point[0]) +", "intersection_points_list = [] # Check if horizontal line of one path intersects with", "= [] path1[\"horizontal\"] = [] for i in range(1, len(path1[\"complete\"])): # 'x' coord", "horiz[0][0] z2 = horiz[1][0] to_return = None if (z1 < z2 and y1", "# print(path1_list) # print(path2_list) # Get absolute coords of line segments path1 =", ": to_return = (x, w) # if to_return: print(\"<< %s :: %s >>", "= {\"complete\": [(0, 0)]} for i in range(0, len(path2_list)): if i: path2[\"complete\"].insert(i +", "f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get", "relative coords of path path1_list = [] path2_list = [] for i in", "len(path1[\"complete\"])): # 'x' coord is same if path1[\"complete\"][i - 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i", "range(0, len(path2_list)): if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1]))", "- 1][1] == path1[\"complete\"][i][1]: path1[\"horizontal\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) path2[\"vertical\"] = [] path2[\"horizontal\"] =", "if c1[0] == c2[0]: if c1[1] < c2[1]: for i in range(c1[1], c2[1]", "in range(c1[0], c2[0] + 1): coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0]", "1): coord_list.append((i, c1[1])) else: for i in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1]))", "0)]} for i in range(0, len(path1_list)): if i: path1[\"complete\"].insert(i + 1, (path1[\"complete\"][i][0] +", "intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg,", "= [] path2[\"horizontal\"] = [] for i in range(1, len(path2[\"complete\"])): # 'x' coord", "y1 < y2 and z1 <= x <= z2 and y1 <= w", "of path path1_list = [] path2_list = [] for i in range(0, len(path1_list_str)):", "of other abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in path2[\"vertical\"]: intersection_point", "elif cmd[0] == \"D\": return (0, -int(cmd[1:])) def check_intersection(horiz, vert): x = vert[0][0]", "of one path intersects with vertical line of other abd vice-versa for h_seg", "path1[\"vertical\"]) # print(\"%s\\n\" % path2[\"horizontal\"]) # print(\"%s\\n\" % path2[\"vertical\"]) intersection_points_list = [] #", "for v_seg in path1[\"vertical\"]: intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) print(intersection_points_list) dist", "else: path1[\"complete\"].insert(1, path1_list[0]) path2 = {\"complete\": [(0, 0)]} for i in range(0, len(path2_list)):", "1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\"", "if i: path2[\"complete\"].insert(i + 1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1,", "path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i -", "intersection_point = check_intersection(h_seg, v_seg) if intersection_point: intersection_points_list.append(intersection_point) for h_seg in path2[\"horizontal\"]: for v_seg", "in intersection_points_list[1:]: if dist > (abs(point[0]) + abs(point[1])): dist = abs(point[0]) + abs(point[1])", "f.readline() path2 = f.readline() path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) #", "vert): x = vert[0][0] y1 = vert[0][1] y2 = vert[1][1] w = horiz[0][1]", "15)) p = get_all_points_on_path((-123, 67), (123, 67)) print(p) if __name__ == \"__main__\": main()", "- 1][0] == path1[\"complete\"][i][0]: path1[\"vertical\"].append((path1[\"complete\"][i - 1], path1[\"complete\"][i])) elif path1[\"complete\"][i - 1][1] ==", "== path2[\"complete\"][i][0]: path2[\"vertical\"].append((path2[\"complete\"][i - 1], path2[\"complete\"][i])) elif path2[\"complete\"][i - 1][1] == path2[\"complete\"][i][1]: path2[\"horizontal\"].append((path2[\"complete\"][i", "1], path2[\"complete\"][i])) # print(\"%s\\n\" % path1[\"horizontal\"]) # print(\"%s\\n\" % path1[\"vertical\"]) # print(\"%s\\n\" %", "line of one path intersects with vertical line of other abd vice-versa for", "y1 > y2 and z1 >= x >= z2 and y1 >= w", "1, (path2[\"complete\"][i][0] + path2_list[i][0], path2[\"complete\"][i][1] + path2_list[i][1])) else: path2[\"complete\"].insert(1, path2_list[0]) # Segregate vertical", "vertical line of other abd vice-versa for h_seg in path1[\"horizontal\"]: for v_seg in", "path1_list_str = path1.strip(\"\\n\").split(\",\") path2_list_str = path2.strip(\"\\n\").split(\",\") # print(path1_list) # print(path2_list) # Get relative", "in range(c2[0], c1[0] + 1): coord_list.append((i, c1[1])) # coord_list.reverse() return coord_list def main():", "y2) or\\ (z1 > z2 and y1 < y2 and z1 >= x" ]
[ "for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped", "ins=linebins[4] elif \"Total\" in line and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads", "sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\"", "= 0 numLocs = 0 dcsMaxDepth = 0 for line in depthFile: if", "/ numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert Size\")", "dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1", "dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() #", "in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line:", "in line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in line: totalNt =", "TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif", "= 0 if totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq = 0", "= open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\" in line: runID =", "* linebins[1] numInsertReads += linebins[1] except StopIteration: contIter = False if numInsertReads ==", "'r') lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0 for line in tagstatsFile:", "= float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\")", "open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n')", "rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0])", "\"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line:", "TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\"", "C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read", "0 line = next(insertSizeFile) while \"## HISTOGRAM\" not in line: line = next(insertSizeFile)", "GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if \"##\" not", "line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5]", "indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak", "line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if", "numInsertReads += linebins[1] except StopIteration: contIter = False if numInsertReads == 0: meanInsertSize", "# get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize", "elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in", "run ID from the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\",", "elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in line:", "numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\"", "= float(linebins[4]) elif \"Total\" in line and \"INS\" in line: ins=linebins[4] elif \"Total\"", "open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0 dcsMaxDepth = 0 for line", "Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the run", "\"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in", "indexFile = open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped", "# get read counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats", "mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\" f\"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},\" f\"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},\" f\"{GtoT},{GtoC},{ins},{dels}\\n\" ) outFile.close()", "int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0:", "peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing", "CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line", "in line: linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\"", "in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line and", "rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads", "dest='inFile', required=True, help='Path to indexes file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args()", "if numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads", "percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads", "peakSize = 1 maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion", "line = next(insertSizeFile) if line.strip() != \"\": linebins = [int(x) for x in", "file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads =", "= open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0 line = next(insertSizeFile) while", "= float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0])", "next(insertSizeFile) while \"## HISTOGRAM\" not in line: line = next(insertSizeFile) contIter = True", "linebins[1] except StopIteration: contIter = False if numInsertReads == 0: meanInsertSize = \"N/A\"", "AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\"", "float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0", "linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration: contIter = False if numInsertReads", "> 0: mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\"", "else: percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS =", "= sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0 if totalNt > 0:", "in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") #", "outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family", "size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads", "indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family", "Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0", "Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for", "# insert size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize =", "from the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for", "if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif", "insert size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0", "elif \"Total\" in line and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts =", "indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the run ID from the config", "line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close()", "in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line and", "= 0 numInsertReads = 0 line = next(insertSizeFile) while \"## HISTOGRAM\" not in", "0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS", "and \"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5]", "Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index", "line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get", "float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing", "TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\"", "in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read counts # Read", "in line and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif", "line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5]", "mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion", "0 peakSize = 1 maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <= lastProportion:", "else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert Size\") insertSizeFile =", "line: ins=linebins[4] elif \"Total\" in line and \"DEL\" in line: dels=linebins[4] cmFile.close() if", "AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\"", "get read counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats =", "try: line = next(insertSizeFile) if line.strip() != \"\": linebins = [int(x) for x", "import sys from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path", "line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile =", "# get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\"", "line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration: contIter =", "line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5]", "sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads =", "AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif", "configFile.close() print(\"Getting read counts\") # get read counts # Read tagstats files: rawFlagstats", "ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one per", "in line and \"INS\" in line: ins=linebins[4] elif \"Total\" in line and \"DEL\"", "in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the run ID from the", "+= linebins[1] except StopIteration: contIter = False if numInsertReads == 0: meanInsertSize =", "+= int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs !=", "for line in depthFile: if \"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs", "indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max", "for x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1] except", "Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading config\") #", "peakProportion = 0 peakSize = 1 maxSize=0 for line in tagstatsFile: if float(line.split()[2])", "= [int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads +=", "in line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS =", "~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\",", "TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif", "next(insertSizeFile) if line.strip() != \"\": linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize", "numInsertReads = 0 line = next(insertSizeFile) while \"## HISTOGRAM\" not in line: line", "AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif", "Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's", "totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\" f\"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},\" f\"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},\" f\"{GtoT},{GtoC},{ins},{dels}\\n\"", "depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs =", "StopIteration: contIter = False if numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize", "elif \"Total\" in line and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads >", "line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4]", "linebins[1] numInsertReads += linebins[1] except StopIteration: contIter = False if numInsertReads == 0:", "= 0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else:", "tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\",", "= totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile =", "GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif", "next(insertSizeFile) contIter = True line = next(insertSizeFile) while contIter: try: line = next(insertSizeFile)", "0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") #", "x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration:", "\"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line:", "True line = next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if line.strip() !=", "elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\"", "0 rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS =", "if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS =", "else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\")", "CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\"", "#sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats", "= line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif", "countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\"", "cmFile: if \"##\" not in line and \"OVERALL\" in line: linebins = line.strip().split(',')", "# Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats", "d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read counts # Read tagstats files:", "DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS", "not in line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3]))", "SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS", "totDepth = 0 numLocs = 0 dcsMaxDepth = 0 for line in depthFile:", "meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") # get", "print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\"", "int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close()", "percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0", "lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize =", "\"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS", "contIter = False if numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize =", "file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes =", "totalMuts = float(linebins[4]) elif \"Total\" in line and \"INS\" in line: ins=linebins[4] elif", "= open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r') for line in indexFile:", "Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered", "reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert", "= open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0])", "= 0 line = next(insertSizeFile) while \"## HISTOGRAM\" not in line: line =", "TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\"", "line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4]", "print(f\"Index {index}\") print(\"Reading config\") # Get the run ID from the config file", "AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\"", "# read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0", "runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0]", "dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert", "open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~", "1 maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2])", "\"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line:", "= line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\",", "percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0", "if totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\"", "line and \"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4]", "open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0 for line in", "= 0 for line in depthFile: if \"#\" not in line: totDepth +=", "0 numLocs = 0 dcsMaxDepth = 0 for line in depthFile: if \"#\"", "countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\"", "d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\" in line:", "!= 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file", "\"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\"", "0 sscsPerDCS = 0 if totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq", "parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one per line.') parser.add_argument('--config', dest='config', required=True)", "in depthFile: if \"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs += 1", "rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads >", "depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0 dcsMaxDepth = 0", "= 0 dcsMaxDepth = 0 for line in depthFile: if \"#\" not in", "numLocs = 0 dcsMaxDepth = 0 for line in depthFile: if \"#\" not", "totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\"", "0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing", "'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n')", "float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion =", "dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r')", "GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in", "ID from the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r')", "configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0]", "in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line:", ">= peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize =", "'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,%", "ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one per line.') parser.add_argument('--config', dest='config',", "except StopIteration: contIter = False if numInsertReads == 0: meanInsertSize = \"N/A\" else:", "AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif", "Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0 dcsMaxDepth =", "= [] indexFile = open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw", "lastProportion = 0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close()", "= next(insertSizeFile) contIter = True line = next(insertSizeFile) while contIter: try: line =", "0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS =", "and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads", "peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0]", "tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion", "totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line and \"INS\" in", "\"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line", "line in cmFile: if \"##\" not in line and \"OVERALL\" in line: linebins", "+= 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth /", "if numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert", "Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index", "Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS", "= 0 rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS", "numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth", "totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs", "line: line = next(insertSizeFile) contIter = True line = next(insertSizeFile) while contIter: try:", "the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line", "mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile", "not in line and \"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\" in", "if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion =", "elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in", "rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads > 0: percentMappedDCS", "= rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads > 0:", "elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read counts", "read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs", "CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif", "\"Total\" in line and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4])", "0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS =", "in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line:", "Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats =", "open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\"", "\"Total\" in line and \"INS\" in line: ins=linebins[4] elif \"Total\" in line and", "{index}\") print(\"Reading config\") # Get the run ID from the config file runID=\"\"", "maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile =", "Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0 line", "AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\"", "in line and \"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\" in line:", "line in configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in", "line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4]", "meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile", "linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line:", "line and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\"", "required=True, help='Path to indexes file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile", "in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in line:", "= ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one per line.') parser.add_argument('--config',", "from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes", "dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r')", "numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0", "in line: AtoC=linebins[4] elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4]", "lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0 for line in tagstatsFile: if", "read counts\") # get read counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\",", "Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides", "\"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in", "in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4]", "CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile:", "sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0 if totalNt > 0: mutFreq", "in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line:", "\"SNV\" in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line", "print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0 dcsMaxDepth", "numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size", "CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in", "lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize = line.split()[0]", "Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's", "AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\"", "in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion:", "0: mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\"", "dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r')", "elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\"", "numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert Size\") insertSizeFile", "TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\"", "DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS", "mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\" f\"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},\"", "line = next(insertSizeFile) while \"## HISTOGRAM\" not in line: line = next(insertSizeFile) contIter", "for line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2])", "= open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads =", "open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped", "line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,%", "GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if \"##\" not in", "Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes:", "\"INS\" in line: ins=linebins[4] elif \"Total\" in line and \"DEL\" in line: dels=linebins[4]", "file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0", "one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = []", "'r') totDepth = 0 numLocs = 0 dcsMaxDepth = 0 for line in", "line: linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in", "= True line = next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if line.strip()", "dels=\"\" for line in cmFile: if \"##\" not in line and \"OVERALL\" in", "dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else:", "sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth =", "in line: line = next(insertSizeFile) contIter = True line = next(insertSizeFile) while contIter:", "elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\"", "ins=\"\" dels=\"\" for line in cmFile: if \"##\" not in line and \"OVERALL\"", "and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in", "0 for line in depthFile: if \"#\" not in line: totDepth += int(line.split('\\t')[3])", "line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4]", "sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers", "if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\"", "line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r')", "required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r') for", "= line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth", "numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing", "= line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4]", "\"## HISTOGRAM\" not in line: line = next(insertSizeFile) contIter = True line =", "Get the run ID from the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile", "= 0 peakSize = 1 maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <=", "= open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\"", "max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0 dcsUncovered=\"NA\"", "# Get the run ID from the config file runID=\"\" c=\"\" C=\"\" d=\"\"", "'r') totInsertSize = 0 numInsertReads = 0 line = next(insertSizeFile) while \"## HISTOGRAM\"", "linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads", "if line.strip() != \"\": linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize +=", "in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line:", "0 if totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq = 0 outFile.write(", "totInsertSize = 0 numInsertReads = 0 line = next(insertSizeFile) while \"## HISTOGRAM\" not", "line: totalNt = float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line and \"INS\"", "SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS", "Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max", "parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile,", "indexes = [] indexFile = open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close()", "TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif", "sscsPerDCS = 0 if totalNt > 0: mutFreq = totalMuts/totalNt else: mutFreq =", "elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion = float(line.split()[2])", "mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0 if dcsReads", "= 0 sscsPerDCS = 0 if totalNt > 0: mutFreq = totalMuts/totalNt else:", "print(\"Getting read counts\") # get read counts # Read tagstats files: rawFlagstats =", "\"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in", "print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0", "dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs else:", "percentMappedDCS = 0 sscsPerDCS = 0 if totalNt > 0: mutFreq = totalMuts/totalNt", "== 0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\")", "Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's", "line and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS =", "Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\")", "tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize = 1", "C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\" in", "if \"##\" not in line and \"OVERALL\" in line: linebins = line.strip().split(',') if", "import ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one", "line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif", "open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0])", "dcsReads > 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0", "runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if", "configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\" in line: runID", "config\") # Get the run ID from the config file runID=\"\" c=\"\" C=\"\"", "in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in", "<= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize", "\"Total\" in line and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads > 0:", "if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS =", "for line in configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\"", "files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines()", "open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0 line = next(insertSizeFile) while \"##", "for line in cmFile: if \"##\" not in line and \"OVERALL\" in line:", "CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\" GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for", "On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's", "index in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the run ID from", "tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile = open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth", "\"##\" not in line and \"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\"", "file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile:", "sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0", "o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r') for line", "= mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS = 0 if", "maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif", "line = next(insertSizeFile) contIter = True line = next(insertSizeFile) while contIter: try: line", "not in line: line = next(insertSizeFile) contIter = True line = next(insertSizeFile) while", "to indexes file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w')", "'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads", "= next(insertSizeFile) if line.strip() != \"\": linebins = [int(x) for x in line.strip().split('\\t')]", "argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file,", "and \"INS\" in line: ins=linebins[4] elif \"Total\" in line and \"DEL\" in line:", "get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\"", "Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\")", "if \"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth =", "print(\"Reading config\") # Get the run ID from the config file runID=\"\" c=\"\"", "in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration: contIter", "config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in", "\"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in line: totalNt", "CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif", "line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads", "\"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif \"A>C\" in line: AtoC=linebins[4] elif \"A>G\" in", "= \"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts", "reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean", "line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion = float(line.split()[2]) elif float(line.split()[2]) >=", "totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if \"##\" not in line and", "while contIter: try: line = next(insertSizeFile) if line.strip() != \"\": linebins = [int(x)", "open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"')", "mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean", "dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0])", "line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4]", "\"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read counts #", "1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs", "open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip())", "GsSeq=\"\" GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if", "[int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1]", "Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the", "elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\"", "0 numInsertReads = 0 line = next(insertSizeFile) while \"## HISTOGRAM\" not in line:", "c=\"\" C=\"\" d=\"\" configFile = open(f\"{index}/{index}_config.sh\", 'r') for line in configFile: if \"RUN_ID=\"", "# ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile =", "Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading", "\"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in", "totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if \"##\" not in line", "line: CtoG=linebins[4] elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4]", "= mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0 if", "\"\": linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0] * linebins[1]", "GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\"", "rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads", "> 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS = 0 rawPerSSCS", "Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0 line =", "= next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if line.strip() != \"\": linebins", "elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in", "reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On", "next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if line.strip() != \"\": linebins =", "line in depthFile: if \"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs +=", "in configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line:", "/ numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r')", "= next(insertSizeFile) while \"## HISTOGRAM\" not in line: line = next(insertSizeFile) contIter =", "indexes file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes", "= 0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\"", "0 dcsMaxDepth = 0 for line in depthFile: if \"#\" not in line:", "= open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) #", "float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line and \"INS\" in line: ins=linebins[4]", "float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") # get", "= open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0 for line", "line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in line: totalNt = float(linebins[5])", "'r') for line in configFile: if \"RUN_ID=\" in line: runID = line.strip().split('=')[1].strip('\"') elif", "line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file:", "peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read", "\"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\")", "dcsMaxDepth = 0 for line in depthFile: if \"#\" not in line: totDepth", "> 0: percentMappedDCS = mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS", "line and \"INS\" in line: ins=linebins[4] elif \"Total\" in line and \"DEL\" in", "= open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS", "c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting", "float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize", "Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get", "else: percentMappedDCS = 0 sscsPerDCS = 0 if totalNt > 0: mutFreq =", "line: runID = line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line:", "while \"## HISTOGRAM\" not in line: line = next(insertSizeFile) contIter = True line", "#rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\")", "TsSeq=linebins[5] elif \"T>C\" in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\"", "= open(f\"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt\", 'r') totDepth = 0 numLocs = 0 dcsMaxDepth = 0 for", "elif \"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\"", "totInsertSize += linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration: contIter = False", "HISTOGRAM\" not in line: line = next(insertSizeFile) contIter = True line = next(insertSizeFile)", "= totalMuts/totalNt else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\" f\"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},\" f\"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},\"", "GtoC=linebins[4] elif \"Total\" in line and \"SNV\" in line: totalNt = float(linebins[5]) totalMuts", "\"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line:", "= float(linebins[5]) totalMuts = float(linebins[4]) elif \"Total\" in line and \"INS\" in line:", "totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\",", "cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\"", "get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize =", "contIter: try: line = next(insertSizeFile) if line.strip() != \"\": linebins = [int(x) for", "in line: ins=linebins[4] elif \"Total\" in line and \"DEL\" in line: dels=linebins[4] cmFile.close()", "[] indexFile = open(o.inFile, 'r') for line in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS", "line.strip() != \"\": linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0]", "in line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\"", "counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\", 'r').readlines()", "depthFile: if \"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth", "elif \"G>A\" in line: GtoA=linebins[4] GsSeq=linebins[5] elif \"G>T\" in line: GtoT=linebins[4] elif \"G>C\"", "'r').readlines() dcsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt\", 'r').readlines() rawReads = float(rawFlagstats[0].split()[0]) #rawReads = float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0])", "sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0 if totalNt >", "in indexFile: indexes.append(line.strip()) indexFile.close() outFile.write(\"RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped", "0 peakSize = line.split()[0] peakProportion = float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" #", "Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in indexes: print(f\"Index {index}\") print(\"Reading config\")", "cmFile.close() if sscsReads > 0: percentMappedSSCS = mappedSscs/sscsReads rawPerSSCS = rawReads/sscsReads else: percentMappedSSCS", "Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\\n\") for index in", "+= linebins[0] * linebins[1] numInsertReads += linebins[1] except StopIteration: contIter = False if", "'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\" TtoG=\"\" CsSeq=\"\" CtoA=\"\" CtoT=\"\" CtoG=\"\"", "= False if numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize", "data sys.stderr.write(f\"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n\") cmFile = open(f\"{index}/Final/dcs/{runID}.dcs.countmuts.csv\", 'r') AsSeq=\"\" AtoT=\"\" AtoC=\"\" AtoG=\"\" TsSeq=\"\" TtoA=\"\" TtoC=\"\"", "parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to indexes file, one per line.')", "\"N/A\" else: meanInsertSize = totInsertSize / numInsertReads print(\"Processing countmuts\") # get countmuts data", "counts\") # get read counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines()", "\"C>A\" in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in", "in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0]", "help='Path to indexes file, one per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile =", "float(linebins[4]) elif \"Total\" in line and \"INS\" in line: ins=linebins[4] elif \"Total\" in", "\"#\" not in line: totDepth += int(line.split('\\t')[3]) numLocs += 1 dcsMaxDepth = max(dcsMaxDepth,", "line: GtoT=linebins[4] elif \"G>C\" in line: GtoC=linebins[4] elif \"Total\" in line and \"SNV\"", "for index in indexes: print(f\"Index {index}\") print(\"Reading config\") # Get the run ID", "the run ID from the config file runID=\"\" c=\"\" C=\"\" d=\"\" configFile =", "float(line.split()[2]) maxSize = line.split()[0] tagstatsFile.close() sscsOnTarget=\"NA\" # read depth file: print(\"Processing Depth\") depthFile", "dcsMeanDepth=0 dcsUncovered=\"NA\" depthFile.close() # insert size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\",", "line = next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if line.strip() != \"\":", "per line.') parser.add_argument('--config', dest='config', required=True) o=parser.parse_args() outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile", "depthFile.close() # insert size file print(\"Processing Insert Size\") insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize", "elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read", "\"OVERALL\" in line: linebins = line.strip().split(',') if \"A>T\" in line: AtoT=linebins[4] AsSeq=linebins[5] elif", "elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\" in", "False if numInsertReads == 0: meanInsertSize = \"N/A\" else: meanInsertSize = totInsertSize /", "line: d=line.strip().split('=')[1].split()[0] configFile.close() print(\"Getting read counts\") # get read counts # Read tagstats", "insertSizeFile = open(f\"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt\", 'r') totInsertSize = 0 numInsertReads = 0 line = next(insertSizeFile)", "in line and \"DEL\" in line: dels=linebins[4] cmFile.close() if sscsReads > 0: percentMappedSSCS", "!= \"\": linebins = [int(x) for x in line.strip().split('\\t')] totInsertSize += linebins[0] *", "CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif", "GtoA=\"\" GtoT=\"\" GtoC=\"\" totalNt=\"\" totalMuts=\"\" ins=\"\" dels=\"\" for line in cmFile: if \"##\"", "outFile = open(f\"{o.config}.summary.csv\",'w') indexes = [] indexFile = open(o.inFile, 'r') for line in", "elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4] elif \"G>A\" in", "elif \"Total\" in line and \"INS\" in line: ins=linebins[4] elif \"Total\" in line", "in line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line:", "mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On", "read counts # Read tagstats files: rawFlagstats = open(f\"{index}/Stats/data/{runID}.temp.sort.flagstats.txt\", 'r').readlines() sscsFlagstats = open(f\"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt\",", "sys from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--indexes', dest='inFile', required=True, help='Path to", "= 1 maxSize=0 for line in tagstatsFile: if float(line.split()[2]) <= lastProportion: lastProportion =", "On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation", "line.strip().split('=')[1].strip('\"') elif \"minClonal=\" in line: c=line.strip().split('=')[1].split()[0] elif \"maxClonal=\" in line: C=line.strip().split('=')[1].split()[0] elif \"minDepth=\"", "tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion = 0 peakSize = 1 maxSize=0 for", "print(\"Processing Tagstats\") # get tagstats numbers tagstatsFile = open(f\"{index}/Stats/data/{runID}.tagstats.txt\", 'r') lastProportion=1 peakProportion =", "in line: TtoC=linebins[4] elif \"T>G\" in line: TtoG=linebins[4] elif \"C>A\" in line: CtoA=linebins[4]", "line: CtoA=linebins[4] CsSeq=linebins[5] elif \"C>T\" in line: CtoT=linebins[4] elif \"C>G\" in line: CtoG=linebins[4]", "in cmFile: if \"##\" not in line and \"OVERALL\" in line: linebins =", "else: mutFreq = 0 outFile.write( f\"{runID},\" f\"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},\" f\"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},\" f\"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},\" f\"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},\" f\"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},\" f\"{GtoT},{GtoC},{ins},{dels}\\n\" )", "mappedDcs/dcsReads sscsPerDCS = sscsReads/dcsReads else: percentMappedDCS = 0 sscsPerDCS = 0 if totalNt", "= max(dcsMaxDepth, int(line.split('\\t')[3])) dcsOnTarget=\"NA\" if numLocs != 0: dcsMeanDepth=totDepth / numLocs else: dcsMeanDepth=0", "= float(line.split()[2]) elif float(line.split()[2]) >= peakProportion: lastProportion = 0 peakSize = line.split()[0] peakProportion", "contIter = True line = next(insertSizeFile) while contIter: try: line = next(insertSizeFile) if", "= float(pysam.flagstat(f\"{index}/{runID}.temp.sort.bam\").split('\\n')[0].split()[0]) #sscsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.sscs.sort.bam\").split('\\n') sscsReads=float(sscsFlagstats[0].split()[0]) mappedSscs=float(sscsFlagstats[4].split()[0]) # ~ dcsFlagstat=pysam.flagstat(f\"{index}/{runID}_mem.dcs.sort.bam\").split('\\n') dcsReads=float(dcsFlagstats[0].split()[0]) mappedDcs=float(dcsFlagstats[4].split()[0]) print(\"Processing Tagstats\") #", "\"A>G\" in line: AtoG=linebins[4] elif \"T>A\" in line: TtoA=linebins[4] TsSeq=linebins[5] elif \"T>C\" in" ]
[ "print(sequence[i - 1]) if __name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c')", "only if its not visited if current_vert not in visited: # For a", "G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a',", "already visited dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring node def", "Start traversing from the root node only if its not visited if current_vert", "if __name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f')", "Dictionary to mark the visited nodes for current_vert in G: # G contains", "node def DFSTraversal(G): sequence = [] visited = {} # Dictionary to mark", "vertex objects # Start traversing from the root node only if its not", "visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take", "to mark the visited nodes for current_vert in G: # G contains vertex", "sequence) # recursively traverse the neighbouring node def DFSTraversal(G): sequence = [] visited", "is called only once dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence) for", "True # mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr", "# recursively traverse the neighbouring node def DFSTraversal(G): sequence = [] visited =", "for i in range(length, 0, -1): print(sequence[i - 1]) if __name__ == '__main__':", "0, -1): print(sequence[i - 1]) if __name__ == '__main__': G = Graph() G.add_vertex('a')", "current_vert, visited, sequence) print(sequence) length = len(sequence) for i in range(length, 0, -1):", "i in range(length, 0, -1): print(sequence[i - 1]) if __name__ == '__main__': G", "1]) if __name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e')", "'d', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a', 1) print('Graph data:')", "the neighbour node is already visited dfs(G, nbr, visited, sequence) # recursively traverse", "__name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a',", "For a connected graph this is called only once dfs(G, current_vert, visited, sequence)", "dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence) for i in range(length, 0,", "visited nodes for current_vert in G: # G contains vertex objects # Start", "once dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence) for i in range(length,", "node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a", "node if nbr not in visited: # condition to check whether the neighbour", "G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c',", "in G: # G contains vertex objects # Start traversing from the root", "G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1)", "visited if current_vert not in visited: # For a connected graph this is", "'d', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e',", "if its not visited if current_vert not in visited: # For a connected", "in visited: # For a connected graph this is called only once dfs(G,", "in visited: # condition to check whether the neighbour node is already visited", "print(sequence) length = len(sequence) for i in range(length, 0, -1): print(sequence[i - 1])", "= len(sequence) for i in range(length, 0, -1): print(sequence[i - 1]) if __name__", "1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1)", "mark the visited nodes for current_vert in G: # G contains vertex objects", "len(sequence) for i in range(length, 0, -1): print(sequence[i - 1]) if __name__ ==", "# G contains vertex objects # Start traversing from the root node only", "-1): print(sequence[i - 1]) if __name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b')", "its not visited if current_vert not in visited: # For a connected graph", "sequence) print(sequence) length = len(sequence) for i in range(length, 0, -1): print(sequence[i -", "this is called only once dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence)", "take a neighbouring node if nbr not in visited: # condition to check", "\" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a neighbouring node", "the visited nodes for current_vert in G: # G contains vertex objects #", "1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1)", "the neighbouring node def DFSTraversal(G): sequence = [] visited = {} # Dictionary", "G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d',", "# take a neighbouring node if nbr not in visited: # condition to", "check whether the neighbour node is already visited dfs(G, nbr, visited, sequence) #", "if nbr not in visited: # condition to check whether the neighbour node", "dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring node def DFSTraversal(G): sequence", "nbr, visited, sequence) # recursively traverse the neighbouring node def DFSTraversal(G): sequence =", "DFSTraversal(G): sequence = [] visited = {} # Dictionary to mark the visited", "is already visited dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring node", "the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): #", "length = len(sequence) for i in range(length, 0, -1): print(sequence[i - 1]) if", "'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a',", "= True # mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for", "nodes for current_vert in G: # G contains vertex objects # Start traversing", "from __future__ import print_function from graph_list import Graph def dfs(G, current_vert, visited, sequence):", "import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] = True # mark the", "mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections():", "visited dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring node def DFSTraversal(G):", "graph_list import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] = True # mark", "condition to check whether the neighbour node is already visited dfs(G, nbr, visited,", "current_vert in G: # G contains vertex objects # Start traversing from the", "1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a', 1) print('Graph data:') #print(G.get_edges())", "contains vertex objects # Start traversing from the root node only if its", "traversing from the root node only if its not visited if current_vert not", "G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a', 1) print('Graph data:') #print(G.get_edges()) DFSTraversal(G)", "current_vert.get_connections(): # take a neighbouring node if nbr not in visited: # condition", "to check whether the neighbour node is already visited dfs(G, nbr, visited, sequence)", "objects # Start traversing from the root node only if its not visited", "visited, sequence): visited[current_vert] = True # mark the visited node #print(\"traversal: \" +", "range(length, 0, -1): print(sequence[i - 1]) if __name__ == '__main__': G = Graph()", "from graph_list import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] = True #", "__future__ import print_function from graph_list import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert]", "= Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c',", "whether the neighbour node is already visited dfs(G, nbr, visited, sequence) # recursively", "neighbour node is already visited dfs(G, nbr, visited, sequence) # recursively traverse the", "G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e',", "G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a', 1) print('Graph", "sequence): visited[current_vert] = True # mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID())", "traverse the neighbouring node def DFSTraversal(G): sequence = [] visited = {} #", "visited, sequence) print(sequence) length = len(sequence) for i in range(length, 0, -1): print(sequence[i", "G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1)", "visited: # condition to check whether the neighbour node is already visited dfs(G,", "'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd',", "recursively traverse the neighbouring node def DFSTraversal(G): sequence = [] visited = {}", "in current_vert.get_connections(): # take a neighbouring node if nbr not in visited: #", "G contains vertex objects # Start traversing from the root node only if", "'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e',", "# mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in", "= [] visited = {} # Dictionary to mark the visited nodes for", "# condition to check whether the neighbour node is already visited dfs(G, nbr,", "graph this is called only once dfs(G, current_vert, visited, sequence) print(sequence) length =", "the root node only if its not visited if current_vert not in visited:", "sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a neighbouring node if nbr not", "in range(length, 0, -1): print(sequence[i - 1]) if __name__ == '__main__': G =", "from the root node only if its not visited if current_vert not in", "Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] = True # mark the visited", "def dfs(G, current_vert, visited, sequence): visited[current_vert] = True # mark the visited node", "'__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1)", "not visited if current_vert not in visited: # For a connected graph this", "= {} # Dictionary to mark the visited nodes for current_vert in G:", "nbr in current_vert.get_connections(): # take a neighbouring node if nbr not in visited:", "visited, sequence) # recursively traverse the neighbouring node def DFSTraversal(G): sequence = []", "== '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b',", "G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e',", "for current_vert in G: # G contains vertex objects # Start traversing from", "current_vert not in visited: # For a connected graph this is called only", "- 1]) if __name__ == '__main__': G = Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d')", "{} # Dictionary to mark the visited nodes for current_vert in G: #", "not in visited: # condition to check whether the neighbour node is already", "visited[current_vert] = True # mark the visited node #print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID())", "visited = {} # Dictionary to mark the visited nodes for current_vert in", "neighbouring node if nbr not in visited: # condition to check whether the", "sequence = [] visited = {} # Dictionary to mark the visited nodes", "def DFSTraversal(G): sequence = [] visited = {} # Dictionary to mark the", "connected graph this is called only once dfs(G, current_vert, visited, sequence) print(sequence) length", "import print_function from graph_list import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] =", "G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c',", "+ currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a neighbouring node if", "[] visited = {} # Dictionary to mark the visited nodes for current_vert", "# Dictionary to mark the visited nodes for current_vert in G: # G", "visited: # For a connected graph this is called only once dfs(G, current_vert,", "<filename>algorithm/COMP90038/Graph/TopologicalSort.py<gh_stars>0 from __future__ import print_function from graph_list import Graph def dfs(G, current_vert, visited,", "a neighbouring node if nbr not in visited: # condition to check whether", "dfs(G, current_vert, visited, sequence): visited[current_vert] = True # mark the visited node #print(\"traversal:", "1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1) G.add_edge('d', 'e', 1) G.add_edge('e', 'a', 1)", "only once dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence) for i in", "print_function from graph_list import Graph def dfs(G, current_vert, visited, sequence): visited[current_vert] = True", "node is already visited dfs(G, nbr, visited, sequence) # recursively traverse the neighbouring", "# For a connected graph this is called only once dfs(G, current_vert, visited,", "1) G.add_edge('b', 'd', 1) G.add_edge('b', 'e', 1) G.add_edge('c', 'd', 1) G.add_edge('c', 'e', 1)", "a connected graph this is called only once dfs(G, current_vert, visited, sequence) print(sequence)", "root node only if its not visited if current_vert not in visited: #", "currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a neighbouring node if nbr", "# Start traversing from the root node only if its not visited if", "G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd',", "called only once dfs(G, current_vert, visited, sequence) print(sequence) length = len(sequence) for i", "G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b',", "G: # G contains vertex objects # Start traversing from the root node", "neighbouring node def DFSTraversal(G): sequence = [] visited = {} # Dictionary to", "if current_vert not in visited: # For a connected graph this is called", "#print(\"traversal: \" + currentVert.get_vertex_ID()) sequence.append(current_vert.get_vertex_ID()) for nbr in current_vert.get_connections(): # take a neighbouring", "G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1) G.add_edge('b', 'd', 1) G.add_edge('b',", "for nbr in current_vert.get_connections(): # take a neighbouring node if nbr not in", "Graph() G.add_vertex('a') G.add_vertex('b') G.add_vertex('c') G.add_vertex('d') G.add_vertex('e') G.add_vertex('f') G.add_edge('a', 'b', 1) G.add_edge('a', 'c', 1)", "node only if its not visited if current_vert not in visited: # For", "not in visited: # For a connected graph this is called only once", "current_vert, visited, sequence): visited[current_vert] = True # mark the visited node #print(\"traversal: \"", "nbr not in visited: # condition to check whether the neighbour node is" ]
[ "keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\":", "author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience :: Developers\",", "find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\",", "long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience", "description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3", "\"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\",", ":: 3 - Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software Development ::", "Tools\", \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python ::", "pathlib import Path from setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve() /", "Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\",", "Software Development :: Build Tools\", \"License :: OSI Approved :: MIT License\", \"Programming", "classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience :: Developers\", \"Topic ::", ":: OSI Approved :: MIT License\", \"Programming Language :: Python :: 3 ::", ":: MIT License\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language", "\"Intended Audience :: Developers\", \"Topic :: Software Development :: Build Tools\", \"License ::", "from setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" )", "setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development", "<4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\": [ \"crawler=crawler:cli\", ], }, )", ":: Only\", \"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"),", "Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[", "packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\": [ \"crawler=crawler:cli\", ],", "Language :: Python :: 3 :: Only\", \"Programming Language :: Python :: 3.8\",", "3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ],", "\"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\": [ \"crawler=crawler:cli\",", ") setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[", "\"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\",", "\"Topic :: Software Development :: Build Tools\", \"License :: OSI Approved :: MIT", "Development :: Build Tools\", \"License :: OSI Approved :: MIT License\", \"Programming Language", "Python :: 3 :: Only\", \"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\",", "License\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language :: Python", "Audience :: Developers\", \"Topic :: Software Development :: Build Tools\", \"License :: OSI", "- Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software Development :: Build Tools\",", "Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\",", "Developers\", \"Topic :: Software Development :: Build Tools\", \"License :: OSI Approved ::", "encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\",", "\"License :: OSI Approved :: MIT License\", \"Programming Language :: Python :: 3", "from pathlib import Path from setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve()", "import Path from setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text(", "OSI Approved :: MIT License\", \"Programming Language :: Python :: 3 :: Only\",", ":: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\",", "], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={", "3 - Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software Development :: Build", "3 :: Only\", \"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"},", "package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\": [", "setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A", "long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended", "(Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\",", ":: Software Development :: Build Tools\", \"License :: OSI Approved :: MIT License\",", "= (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description,", "setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup(", "\"Programming Language :: Python :: 3 :: Only\", \"Programming Language :: Python ::", "Approved :: MIT License\", \"Programming Language :: Python :: 3 :: Only\", \"Programming", "str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\",", "url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience ::", ":: 3 :: Only\", \"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\":", "MIT License\", \"Programming Language :: Python :: 3 :: Only\", \"Programming Language ::", "Only\", \"Programming Language :: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8,", "Status :: 3 - Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software Development", "Path from setuptools import find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\"", "Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software Development :: Build Tools\", \"License", "long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web", "name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status", ":: Python :: 3 :: Only\", \"Programming Language :: Python :: 3.8\", ],", "python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\", \"aiohttp==3.8.1\", \"yarl==1.7.2\", ], entry_points={ \"console_scripts\": [ \"crawler=crawler:cli\", ], },", ":: Developers\", \"Topic :: Software Development :: Build Tools\", \"License :: OSI Approved", ":: Build Tools\", \"License :: OSI Approved :: MIT License\", \"Programming Language ::", ":: Python :: 3.8\", ], keywords=\"crawler\", package_dir={\"\": \"src\"}, packages=find_packages(where=\"src\"), python_requires=\">=3.8, <4\", install_requires=[ \"click==7.1.2\",", "/ \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\", version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\",", "import find_packages, setup long_description: str = (Path(__file__).parent.resolve() / \"README.md\").read_text( encoding=\"utf-8\" ) setup( name=\"crawler\",", "Build Tools\", \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python", "author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience :: Developers\", \"Topic", "Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 3 -", "\"Development Status :: 3 - Alpha\", \"Intended Audience :: Developers\", \"Topic :: Software", "version=\"0.0.0\", description=\"A Web Crawler\", long_description=long_description, long_description_content_type=\"text/markdown\", url=\"https://github.com/darkslab/Crawler\", author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status ::" ]