code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
from pygameImporter import pygame from Frame.baseFunctions import * from Frame.gui.Gui import Gui class ProgressBar(Gui): def __init__(self, fillPercentage, fillColor, *args, **kwargs): super().__init__(*args, **kwargs) output("Progress Bar: Creating " + self.text + " progress bar...", "debug") self.progress = fillPercentage self.fillColor = fillColor self.touchable = False def setProgress(self, progress): output("Progress Bar: Setting progress to " + str(progress) + "%...", "debug") if progress > 100: progress = 100 self.progress = progress def render(self): super().render(False) output("Progress Bar: Getting points for drawing...", "complete") points = [[self.coords[0] + 1, self.coords[1] + 1], [self.coords[0] + self.coords[2] * self.progress / 100 - 1, self.coords[1] + 1], [self.coords[0] + self.coords[2] * self.progress / 100 - 1, self.coords[1] + self.coords[3] - 1], [self.coords[0] + 1, self.coords[1] + self.coords[3] - 1]] output("Progress Bar: Drawing...", "complete") pygame.draw.polygon(self.window.surface, self.fillColor, points) output("Progress Bar: Rendering text...", "complete") try: if not self.writable and self.text == "": self.renderObj.text(self.fontFile, int(self.textSize + self.height - self.startCoords[3]), self.enterText, self.antialias, self.textColor, None, self.window.surface, width = self.width, height = self.height, addX = self.x, addY = self.y) except AttributeError: self.renderObj.text(self.fontFile, int(self.textSize + self.height - self.startCoords[3]), self.text, self.antialias, self.textColor, None, self.window.surface, width = self.width, height = self.height, addX = self.x, addY = self.y)
[ "pygameImporter.pygame.draw.polygon" ]
[((1171, 1235), 'pygameImporter.pygame.draw.polygon', 'pygame.draw.polygon', (['self.window.surface', 'self.fillColor', 'points'], {}), '(self.window.surface, self.fillColor, points)\n', (1190, 1235), False, 'from pygameImporter import pygame\n')]
from enums.enums import MediusEnum, CallbackStatus from utils import utils from medius.mediuspackets.disbandclanresponse import DisbandClanResponseSerializer class DisbandClanSerializer: data_dict = [ {'name': 'mediusid', 'n_bytes': 2, 'cast': None}, {'name': 'message_id', 'n_bytes': MediusEnum.MESSAGEID_MAXLEN, 'cast': None}, {'name': 'session_key', 'n_bytes': MediusEnum.SESSIONKEY_MAXLEN, 'cast': None}, {'name': 'buf', 'n_bytes': 2, 'cast': None}, {'name': 'clan_id', 'n_bytes': 4, 'cast': utils.bytes_to_int_little}, ] class DisbandClanHandler: def process(self, serialized, monolith, con): client_manager = monolith.get_client_manager() client_manager.disband_clan(serialized['clan_id']) return [DisbandClanResponseSerializer.build( serialized['message_id'], CallbackStatus.SUCCESS )]
[ "medius.mediuspackets.disbandclanresponse.DisbandClanResponseSerializer.build" ]
[((784, 873), 'medius.mediuspackets.disbandclanresponse.DisbandClanResponseSerializer.build', 'DisbandClanResponseSerializer.build', (["serialized['message_id']", 'CallbackStatus.SUCCESS'], {}), "(serialized['message_id'],\n CallbackStatus.SUCCESS)\n", (819, 873), False, 'from medius.mediuspackets.disbandclanresponse import DisbandClanResponseSerializer\n')]
# coding=utf-8 from bs4 import BeautifulSoup import copy import time import json import LOGIN import MENU class PlannedCourseInfo: def __init__(self, main_num=None, name=None, code=None, margin=None, detail=None, url=None, course_dic=None): if course_dic is None: self.num = str(main_num) self.name = str(name) self.code = str(code) self.margin = str(margin) self.url = url self.detail = copy.deepcopy(detail) else: self.num = course_dic["num"] self.name = course_dic["name"] self.code = course_dic["code"] self.margin = course_dic["margin"] self.url = course_dic["url"] self.detail = course_dic["detail"] def show_course_summary(self): print("主编号:" + self.num + "\t名称:" + self.name + "\t代码:" + self.code) def show_course_info(self): for item in self.detail: print(" ∟____ 辅编号:" + item["secondary_num"] + "\t教师:" + item["teacher"] + "\t时间:" + item["time"]) # print(self.code) def to_json(self): """ 将本类的数据转换为一个json,并返回字符串 """ js = {"name": self.name, "num": self.num, "code": self.code, "margin": self.margin, "url": self.url, "detail": self.detail} return json.dumps(js) class PlannedCourse: """ 思路: 1:登录 2:进入选课界面 3:抓取课程信息并保存 4:用户输入想要抢的一门或几门课程 5:开始抢课 """ def __init__(self, account): """初始化登录""" self.account = account self.english_course = [] self.professional_course = [] self.target = "" def init_menu(self): """输出菜单,并输入想要抢的课程""" menu_dic = { "-1": "更新数据(需要等待一分半左右)", "1": "本专业课程", "2": "大学英语扩展课", "0": "退出", } menu = MENU.MENU(menu_dic=menu_dic) menu.print_list() while True: _key = input(">>>") if int(_key) == 1: # 设置本专业课程target self.get_professional_course() print("输入课程编号选择课程,0返回") for item in self.professional_course: item.show_course_summary() length = len(self.professional_course) while True: i_key = input("(主编号)>>>") if 0 < int(i_key) <= length: print("你选择了", self.professional_course[int(i_key) - 1].name) self.professional_course[int(i_key) - 1].show_course_info() item_length = len(self.professional_course[int(i_key) - 1].detail) while True: j_key = input("(辅编号)>>>") if 1 <= int(j_key) <= item_length: detail = self.professional_course[int(i_key) - 1].detail[int(j_key) - 1] print("你选择了: 辅编号:", detail["secondary_num"], "\t教师:", detail["teacher"], "\t时间:", detail["time"]) tmp = i_key + ":" + j_key self.target = tmp self.attack_professional() return elif int(j_key) == 0: break else: print("请输入正确的数字") elif int(i_key) == 0: break elif int(i_key) == -1: self.update_course() else: print("请输入正确的数字") elif int(_key) == 2: # 设置英语扩展课课程target self.get_english_course() print("输入课程编号选择课程,0返回") for item in self.english_course: item.show_course_summary() length = len(self.english_course) while True: i_key = input("(主编号)>>>") if 0 < int(i_key) <= length: print("你选择了", self.english_course[int(i_key) - 1].name) self.english_course[int(i_key) - 1].show_course_info() item_length = len(self.english_course[int(i_key) - 1].detail) while True: j_key = input("(辅编号)>>>") if 1 <= int(j_key) <= item_length: detail = self.english_course[int(i_key) - 1].detail[int(j_key) - 1] print("你选择了: 辅编号:", detail["secondary_num"], "\t教师:", detail["teacher"], "\t时间:", detail["time"]) tmp = i_key + ":" + j_key self.target = tmp self.attack_english() return elif int(j_key) == 0: break else: print("请输入正确的数字") elif int(i_key) == 0: break elif int(i_key) == -1: self.update_course() else: print("请输入正确的数字") # elif int(_key) == 3: # pass elif int(_key) == -1: self.update_course() elif int(_key) == 0: return else: print("请输入正确的数字") def __catch_view_state(self): """抓取 HTML中的 VIEWSTATE""" url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data[ "username"] + "&xm=" + self.account.name + "&gnmkdm=N121101" header = LOGIN.ZUCC.InitHeader header["Referer"] = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"] response = self.account.session.get(url=url, headers=header) while response.status_code == 302: response = self.account.session.get(url=url, headers=header) time.sleep(0.2) self.account.soup = BeautifulSoup(response.text, "lxml") # print(response.status_code) def __enter_english_page(self): """进入计划内选课--英语页面,为抓取数据做准备""" self.__catch_view_state() url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"] post_data = {"__EVENTTARGET": "", "__EVENTARGUMENT": "", "__LASTFOCUS": "", "__VIEWSTATEGENERATOR": "4842AF95", "zymc": "0121%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A7%91%E5%AD%A6%E4%B8%8E%E6%8A%80%E6%9C%AF%E4%B8%BB%E4%BF%AE%E4%B8%93%E4%B8%9A%7C%7C2019", "xx": "", "Button3": "大学英语拓展课", "__VIEWSTATE": self.account.soup.find(name='input', id="__VIEWSTATE")["value"]} response = self.account.session.post(url=url, data=post_data) self.account.soup = BeautifulSoup(response.text, "lxml") links = self.account.soup.find_all(name="tr") return links def __enter_professional_course(self): """进入计划内选课--本专业页面,为抓取数据做准备""" self.__catch_view_state() url = LOGIN.ZUCC.PlanCourageURL + "?xh=" + self.account.account_data["username"] post_data = {"__EVENTTARGET": "", "__EVENTARGUMENT": "", "__LASTFOCUS": "", "__VIEWSTATEGENERATOR": "4842AF95", "xx": "", "Button5": "本专业选课", "__VIEWSTATE": self.account.soup.find(name='input', id="__VIEWSTATE")["value"]} response = self.account.session.post(url=url, data=post_data) # print(response.text) self.account.soup = BeautifulSoup(response.text, "lxml") links = self.account.soup.find_all(name="tr") return links def get_english_course(self): """从文件中取得课程数据""" js_file = open("english_information.json", "r", encoding='utf-8') js_list = json.load(js_file) js_file.close() for course in js_list: tmp = PlannedCourseInfo(course_dic=course) self.english_course.append(tmp) def get_professional_course(self): """从文件中取得课程数据""" js_file = open("professional_information.json", "r", encoding='utf-8') js_list = json.load(js_file) js_file.close() for course in js_list: tmp = PlannedCourseInfo(course_dic=course) self.professional_course.append(tmp) def update_course(self): """更新课程信息并保存到文件""" links = self.__enter_english_page() course_list = [] i = 1 # 遍历10种英语课程 for link in links[1:-1]: tmp = link.find_all("td") detail = [] url = "http://" + LOGIN.ZUCC.DOMAIN + tmp[0].find(name="a")["onclick"][21:-8] header = LOGIN.ZUCC.InitHeader header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh="+self.account.account_data['username'] time.sleep(4) item_response = self.account.session.get(url=url, headers=header) item_soup = BeautifulSoup(item_response.text, "lxml") item_trs = item_soup.find_all(name="tr") j = 1 print('.', end='') # 遍历所以的教学班 for item_tr in item_trs[1:-1]: tds = item_tr.find_all("td") detail_td = {"secondary_num": str(j), "code": tds[0].find(name="input")["value"], "teacher": tds[2].find(name="a").text, "time": tds[3].text, "margin": str(int(tds[11].text) - int(tds[13].text)) + "/" + tds[11].text} # 将教学班信息打包成列表 detail.append(detail_td) j += 1 tmp = link.find_all("td") course_list.append( PlannedCourseInfo(main_num=i, name=tmp[1].find(name="a").text, code=tmp[0].find(name="a").text, margin=tmp[9].text, detail=detail, url=url)) i += 1 js_str = "[" flag = True for course in course_list: if flag: js_str += course.to_json() flag = False else: js_str += "," + course.to_json() js_str += "]" # 缓存在文件 english_file = open("english_information.json", "w", encoding='utf-8') english_file.write(js_str) english_file.close() links = self.__enter_professional_course() course_list = [] i = 1 # 遍历专业课程 for link in links[1:-1]: tmp = link.find_all("td") detail = [] url = "http://" + LOGIN.ZUCC.DOMAIN + "/clsPage/xsxjs.aspx?" + "xkkh=" + \ tmp[0].find(name="a")["onclick"].split("=")[1][0:-3] + "&xh=" + self.account.account_data["username"] header = LOGIN.ZUCC.InitHeader header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040" time.sleep(4) # print(url) item_response = self.account.session.get(url=url, headers=header) # print(item_response.text) item_soup = BeautifulSoup(item_response.text, "lxml") item_trs = item_soup.find_all(name="tr") j = 1 print('.', end='') # 遍历所以的教学班 for item_tr in item_trs[1:-1]: tds = item_tr.find_all("td") detail_td = {"secondary_num": str(j), "code": tds[0].find(name="input")["value"], "teacher": tds[2].find(name="a").text, "time": tds[3].text, "margin": str(int(tds[11].text) - int(tds[13].text)) + "/" + tds[11].text} # 将教学班信息打包成列表 detail.append(detail_td) j += 1 tmp = link.find_all("td") course_list.append( PlannedCourseInfo(main_num=i, name=tmp[1].find(name="a").text, code=tmp[0].find(name="a").text, margin=tmp[9].text, detail=detail, url=url)) i += 1 js_str = "[" flag = True for course in course_list: if flag: js_str += course.to_json() flag = False else: js_str += "," + course.to_json() js_str += "]" # 缓存在文件 professional_file = open("professional_information.json", "w", encoding='utf-8') professional_file.write(js_str) professional_file.close() print("\n更新完成!") def attack_english(self): self.get_english_course() self.__enter_english_page() course_xy = self.target.split(":") x = int(course_xy[0]) y = int(course_xy[1]) header = LOGIN.ZUCC.InitHeader header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040" response = self.account.session.get(url=self.english_course[x - 1].url, headers=header) # print(self.english_course[x - 1].url) self.account.soup = BeautifulSoup(response.text, "lxml") post_data = {"__EVENTTARGET": "Button1", "__VIEWSTATEGENERATOR": "55DF6E88", "RadioButtonList1": "1", "xkkh": self.english_course[x - 1].detail[y - 1]["code"], "__VIEWSTATE": self.account.soup.find_all(name='input', id="__VIEWSTATE")[0]["value"]} while True: response = self.account.session.post(url=self.english_course[x - 1].url, data=post_data) soup = BeautifulSoup(response.text, "lxml") try: reply = soup.find(name="script").text.split("'")[1] except BaseException: reply = "未知错误" print(reply+"\t\t"+str(time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())))) if reply == "选课成功!": return def attack_professional(self): self.get_professional_course() self.__enter_professional_course() course_xy = self.target.split(":") x = int(course_xy[0]) y = int(course_xy[1]) header = LOGIN.ZUCC.InitHeader header["Referer"] = "http://xk.zucc.edu.cn/xs_main.aspx?xh=31901040" response = self.account.session.get(url=self.professional_course[x - 1].url, headers=header) # print(self.professional_course[x - 1].url) # print(response.text) self.account.soup = BeautifulSoup(response.text, "lxml") post_data = {"__EVENTTARGET": "Button1", "__VIEWSTATEGENERATOR": "55DF6E88", "RadioButtonList1": "1", "xkkh": self.professional_course[x - 1].detail[y - 1]["code"], "__VIEWSTATE": self.account.soup.find_all(name='input', id="__VIEWSTATE")[0]["value"]} while True: response = self.account.session.post(url=self.professional_course[x - 1].url, data=post_data) soup = BeautifulSoup(response.text, "lxml") try: reply = soup.find(name="script").text.split("'")[1] except BaseException: reply = "未知错误" print(reply) if reply == "选课成功!": return if __name__ == "__main__": account = LOGIN.Account() account.login() planned_course_spider = PlannedCourse(account) # planned_course_spider.update_course() planned_course_spider.init_menu() # planned_course_spider.catch_english_course() # planned_course_spider.update_course()
[ "copy.deepcopy", "json.load", "json.dumps", "time.sleep", "time.time", "MENU.MENU", "LOGIN.Account", "bs4.BeautifulSoup" ]
[((15458, 15473), 'LOGIN.Account', 'LOGIN.Account', ([], {}), '()\n', (15471, 15473), False, 'import LOGIN\n'), ((1384, 1398), 'json.dumps', 'json.dumps', (['js'], {}), '(js)\n', (1394, 1398), False, 'import json\n'), ((1933, 1961), 'MENU.MENU', 'MENU.MENU', ([], {'menu_dic': 'menu_dic'}), '(menu_dic=menu_dic)\n', (1942, 1961), False, 'import MENU\n'), ((6288, 6324), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (6301, 6324), False, 'from bs4 import BeautifulSoup\n'), ((7087, 7123), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (7100, 7123), False, 'from bs4 import BeautifulSoup\n'), ((7805, 7841), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (7818, 7841), False, 'from bs4 import BeautifulSoup\n'), ((8069, 8087), 'json.load', 'json.load', (['js_file'], {}), '(js_file)\n', (8078, 8087), False, 'import json\n'), ((8405, 8423), 'json.load', 'json.load', (['js_file'], {}), '(js_file)\n', (8414, 8423), False, 'import json\n'), ((13214, 13250), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (13227, 13250), False, 'from bs4 import BeautifulSoup\n'), ((14620, 14656), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (14633, 14656), False, 'from bs4 import BeautifulSoup\n'), ((474, 495), 'copy.deepcopy', 'copy.deepcopy', (['detail'], {}), '(detail)\n', (487, 495), False, 'import copy\n'), ((6244, 6259), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (6254, 6259), False, 'import time\n'), ((9096, 9109), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (9106, 9109), False, 'import time\n'), ((9212, 9253), 'bs4.BeautifulSoup', 'BeautifulSoup', (['item_response.text', '"""lxml"""'], {}), "(item_response.text, 'lxml')\n", (9225, 9253), False, 'from bs4 import BeautifulSoup\n'), ((11128, 11141), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (11138, 11141), False, 'import time\n'), ((11309, 11350), 'bs4.BeautifulSoup', 'BeautifulSoup', (['item_response.text', '"""lxml"""'], {}), "(item_response.text, 'lxml')\n", (11322, 11350), False, 'from bs4 import BeautifulSoup\n'), ((13730, 13766), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (13743, 13766), False, 'from bs4 import BeautifulSoup\n'), ((15147, 15183), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (15160, 15183), False, 'from bs4 import BeautifulSoup\n'), ((13998, 14009), 'time.time', 'time.time', ([], {}), '()\n', (14007, 14009), False, 'import time\n')]
import os from bitfield import BitField from constance import config from django.db import models from django.contrib.auth.models import User from django.conf import settings from django.core.validators import MaxValueValidator, MinValueValidator from django.db.models.signals import post_save import django.db.models.options as options from django.dispatch import receiver from timezone_field import TimeZoneField from notifications.vk import VK ALL_WEEKDAYS_BITMAP = 127 options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('file_fields',) class PublishModel(models.Model): is_published = models.BooleanField(verbose_name='публикация', default=True) class Meta: abstract = True def publish(self): self.is_published = True super(PublishModel, self).save() def unpublish(self): self.is_published = False super(PublishModel, self).save() class PublishFileQuerySet(models.query.QuerySet): def delete(self): for d in self: list_of_image_fields = [f['field_name'] for f in d._meta.__dict__.get('file_fields', [])] d.delete_files(list_of_image_fields) super(PublishFileQuerySet, self).delete() class PublishFileManager(models.Manager): def get_queryset(self): return PublishFileQuerySet(self.model, using=self._db) class PublishFileModel(PublishModel): objects = PublishFileManager() class Meta: abstract = True def delete_files(self, list_of_fieldnames=list()): model = self.__class__ try: obj = model.objects.get(pk=self.pk) except model.DoesNotExist: return # Delete all selected image fields within a model for field in list_of_fieldnames: try: # path = obj._meta.get_field(field).path path = getattr(obj, field).path if os.path.isfile(path): os.remove(path) except ValueError: pass def get_old_file_path_if_changed(self): model = self.__class__ list_of_field_names = list() try: instance = model.objects.get(pk=self.pk) except model.DoesNotExist: return list() for field in instance._meta.__dict__.get('file_fields', []): fieldname = field['field_name'] try: new_path = getattr(self, fieldname).path except ValueError: new_path = '' try: old_path = getattr(instance, fieldname).path except ValueError: old_path = '' if new_path != old_path: list_of_field_names.append(fieldname) return list_of_field_names def additional_action_on_save(self, list_of_changed_image_fields, created): """ To be overwritten in child models. """ pass def save(self, *args, **kwargs): created = not self.id list_of_changed_image_fields = self.get_old_file_path_if_changed() self.delete_files(list_of_changed_image_fields) super(PublishModel, self).save(*args, **kwargs) self.additional_action_on_save(list_of_changed_image_fields, created) super(PublishModel, self).save() def delete(self, *args, **kwargs): list_of_image_fields = [d['field_name'] for d in self._meta.__dict__.get('file_fields', [])] self.delete_files(list_of_image_fields) super(PublishModel, self).save(*args, **kwargs) super(PublishModel, self).delete(*args, **kwargs) class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='Пользователь') timezone = TimeZoneField(default=settings.DEFAULT_TIME_ZONE, verbose_name='Временная зона') email_confirmed = models.BooleanField(default=False, verbose_name='Email подтвержден') send_distributions = models.BooleanField(verbose_name='Отправлять рассылку', default=False) send_hour = models.IntegerField(verbose_name='Час рассылки', default=16, validators=[MaxValueValidator(23), MinValueValidator(0)]) weekdays = BitField(verbose_name='Битовый код дней рассылки', flags=(('monday', 'Понедельник'), ('tuesday', 'Вторник'), ('wednesday', 'Среда'), ('thursday', 'Четверг'), ('friday', 'Пятница'), ('saturday', 'Суббота'), ('sunday', 'Воскресенье')), default=ALL_WEEKDAYS_BITMAP) class Meta: db_table = 'user_profile' verbose_name = 'Профиль пользователя' verbose_name_plural = 'Профили пользователей' def __str__(self): return self.user.username def is_staff(self): return self.user.is_staff is_staff.short_description = 'Модератор' def is_superuser(self): return self.user.is_superuser is_superuser.short_description = 'Суперпользователь' @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: Profile.objects.create( user=instance, email_confirmed=instance.is_staff ) instance.profile.save()
[ "django.db.models.OneToOneField", "os.remove", "django.core.validators.MinValueValidator", "django.dispatch.receiver", "django.db.models.BooleanField", "os.path.isfile", "timezone_field.TimeZoneField", "bitfield.BitField", "django.core.validators.MaxValueValidator" ]
[((4974, 5006), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (4982, 5006), False, 'from django.dispatch import receiver\n'), ((597, 657), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""публикация"""', 'default': '(True)'}), "(verbose_name='публикация', default=True)\n", (616, 657), False, 'from django.db import models\n'), ((3634, 3720), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""Пользователь"""'}), "(User, on_delete=models.CASCADE, verbose_name=\n 'Пользователь')\n", (3654, 3720), False, 'from django.db import models\n'), ((3731, 3816), 'timezone_field.TimeZoneField', 'TimeZoneField', ([], {'default': 'settings.DEFAULT_TIME_ZONE', 'verbose_name': '"""Временная зона"""'}), "(default=settings.DEFAULT_TIME_ZONE, verbose_name='Временная зона'\n )\n", (3744, 3816), False, 'from timezone_field import TimeZoneField\n'), ((3834, 3902), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Email подтвержден"""'}), "(default=False, verbose_name='Email подтвержден')\n", (3853, 3902), False, 'from django.db import models\n'), ((3928, 3998), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Отправлять рассылку"""', 'default': '(False)'}), "(verbose_name='Отправлять рассылку', default=False)\n", (3947, 3998), False, 'from django.db import models\n'), ((4185, 4460), 'bitfield.BitField', 'BitField', ([], {'verbose_name': '"""Битовый код дней рассылки"""', 'flags': "(('monday', 'Понедельник'), ('tuesday', 'Вторник'), ('wednesday', 'Среда'),\n ('thursday', 'Четверг'), ('friday', 'Пятница'), ('saturday', 'Суббота'),\n ('sunday', 'Воскресенье'))", 'default': 'ALL_WEEKDAYS_BITMAP'}), "(verbose_name='Битовый код дней рассылки', flags=(('monday',\n 'Понедельник'), ('tuesday', 'Вторник'), ('wednesday', 'Среда'), (\n 'thursday', 'Четверг'), ('friday', 'Пятница'), ('saturday', 'Суббота'),\n ('sunday', 'Воскресенье')), default=ALL_WEEKDAYS_BITMAP)\n", (4193, 4460), False, 'from bitfield import BitField\n'), ((1890, 1910), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1904, 1910), False, 'import os\n'), ((4124, 4145), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(23)'], {}), '(23)\n', (4141, 4145), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((4147, 4167), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (4164, 4167), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((1932, 1947), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1941, 1947), False, 'import os\n')]
import os import numpy as np from frovedis.exrpc.server import FrovedisServer from frovedis.matrix.dvector import FrovedisDvector from frovedis.matrix.dense import FrovedisRowmajorMatrix FrovedisServer.initialize("mpirun -np 2 {}".format(os.environ['FROVEDIS_SERVER'])) dv = FrovedisDvector([1,2,3,4,5,6,7,8],dtype=np.float64) dv.debug_print() FrovedisServer.shut_down()
[ "frovedis.exrpc.server.FrovedisServer.shut_down", "frovedis.matrix.dvector.FrovedisDvector" ]
[((277, 336), 'frovedis.matrix.dvector.FrovedisDvector', 'FrovedisDvector', (['[1, 2, 3, 4, 5, 6, 7, 8]'], {'dtype': 'np.float64'}), '([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float64)\n', (292, 336), False, 'from frovedis.matrix.dvector import FrovedisDvector\n'), ((346, 372), 'frovedis.exrpc.server.FrovedisServer.shut_down', 'FrovedisServer.shut_down', ([], {}), '()\n', (370, 372), False, 'from frovedis.exrpc.server import FrovedisServer\n')]
from manageXML.management.commands._giella_xml import GiellaXML from django.core.management.base import BaseCommand, CommandError import os, glob, sys from manageXML.models import * from django.conf import settings from collections import defaultdict ignore_affiliations = False def create_lexeme(ll: GiellaXML.Item, lang: Language, datafile: DataFile = None): try: _l = Lexeme.objects.get(lexeme=ll.text.strip(), pos=ll.pos.strip(), homoId=ll.homoId, language=lang) except: _l = Lexeme.objects.create( lexeme=ll.text.strip(), pos=ll.pos.strip(), homoId=ll.homoId, language=lang, contlex=ll.contlex.strip(), imported_from=datafile) _filtered_attributes = ll.filtered_attributes() for _k, _v in _filtered_attributes.items(): _metadata_type = None if _k == 'gen': _metadata_type = GENDER elif _k == 'type': _metadata_type = LEXEME_TYPE elif _k == 'ignore': _metadata_type = IGNORE_TAG else: _v = "{},{}".format(_k, _v.strip()) _lmd, created = LexemeMetadata.objects.get_or_create(lexeme=_l, type=_metadata_type, text=_v) if ignore_affiliations: return _l title = _l.find_akusanat_affiliation() # link it if title: a, created = Affiliation.objects.get_or_create(lexeme=_l, title=title, type=AKUSANAT, link="{}{}".format(settings.WIKI_URL, title)) return _l def parseXML(filename, filepos): print("processing: " + filename) g = GiellaXML.parse_file(filename) gl = Language.objects.get(id=g.lang) # src_language langs = { g.lang: gl } filename_only = os.path.splitext(os.path.basename(filename))[0] df = DataFile(lang_source=gl, lang_target=None, name=filename_only) df.save() for e in g.elements: _ll = None _l = None try: for lg in e.get('lg', []): _l = lg.get('l', []) if not _l: continue # Add ignore=fst to the lexeme if e.ignore: _l.attributes['ignore'] = e.ignore _ll = create_lexeme(_l[0], gl, df) # create the lemma for stg in lg.get('stg', []): for st in stg.get('st', []): # stems s, created = Stem.objects.get_or_create(lexeme=_ll, text=st.text.strip(), homoId=st.homoId, contlex=st.contlex) # add the stems if not _ll: # shouldn't happen but if it did, then we shouldn't get it there continue for mg in e.get('mg', []): l_relations = defaultdict(list) for tg in mg.get('tg', []): # translations _lang = tg.attributes.get('xml:lang') if _lang and _lang not in langs: try: langs[_lang] = Language.objects.get(id=_lang) except: continue for t in tg.get('t', []): _t = create_lexeme(t, langs[_lang], df) r, created = Relation.objects.get_or_create(lexeme_from=_ll, lexeme_to=_t) l_relations[_lang].append(r) for xg in mg.get('xg', []): # examples x = xg.get('x', []) if not x: continue x = x[0].text _xl, created = Example.objects.get_or_create(lexeme=_ll, text=x) for xt in xg.get('xt', []): _lang = xt.attributes.get('xml:lang') if _lang not in l_relations: continue _r = l_relations[_lang].pop(0) re_src, created = RelationExample.objects.get_or_create(relation=_r, text=x, language=gl) xtt = xt.text re_tgt, created = RelationExample.objects.get_or_create(relation=_r, text=xtt, language=langs[_lang]) # add the link between the relations here # RelationExampleRelation.objects.get_or_create(...) for semantic in mg.get('semantics', []): pass for defNative in mg.get('defNative', []): if not defNative or not defNative.text: continue _lmd, created = LexemeMetadata.objects.get_or_create(lexeme=_ll, type=DEF_NATIVE, text=defNative.text.strip()) for source in e.get('sources', []): pass except Exception as err: sys.stderr.write("Error @ %s: %s\n" % (str(_l[0].text) if _l and len(_l) > 0 else '', str(err))) class Command(BaseCommand): ''' Example: python manage.py import_xml -d ../saame/ Add --ignore-affiliations when debugging and want to speed up imports. ''' help = 'This command imports the content of a all Giella XML documents in a directory.' def add_arguments(self, parser): parser.add_argument('-d', '--dir', type=str, help='The directory containing XML files.', ) parser.add_argument('--ignore-affiliations', dest='ignore_affiliations', action='store_true') parser.set_defaults(ignore_affiliations=False) def handle(self, *args, **options): global ignore_affiliations xml_dir = options['dir'] # the directory containing the XML files ignore_affiliations = options['ignore_affiliations'] if not os.path.isdir(xml_dir): raise CommandError('Directory "%s" does not exist.' % xml_dir) for filename in glob.glob(os.path.join(xml_dir, '*.xml')): # read each file and parse it filepos = filename.split('/')[-1].split('_')[:-1] try: parseXML(filename, filepos) except Exception as err: self.stderr.write(self.style.ERROR('Error processing %s: %s' % (filename, str(err)))) self.stdout.write(self.style.SUCCESS('Successfully imported the files in %s.' % (xml_dir,)))
[ "os.path.basename", "os.path.isdir", "manageXML.management.commands._giella_xml.GiellaXML.parse_file", "collections.defaultdict", "django.core.management.base.CommandError", "os.path.join" ]
[((1595, 1625), 'manageXML.management.commands._giella_xml.GiellaXML.parse_file', 'GiellaXML.parse_file', (['filename'], {}), '(filename)\n', (1615, 1625), False, 'from manageXML.management.commands._giella_xml import GiellaXML\n'), ((1760, 1786), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1776, 1786), False, 'import os, glob, sys\n'), ((5902, 5924), 'os.path.isdir', 'os.path.isdir', (['xml_dir'], {}), '(xml_dir)\n', (5915, 5924), False, 'import os, glob, sys\n'), ((5944, 6000), 'django.core.management.base.CommandError', 'CommandError', (['(\'Directory "%s" does not exist.\' % xml_dir)'], {}), '(\'Directory "%s" does not exist.\' % xml_dir)\n', (5956, 6000), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((6036, 6066), 'os.path.join', 'os.path.join', (['xml_dir', '"""*.xml"""'], {}), "(xml_dir, '*.xml')\n", (6048, 6066), False, 'import os, glob, sys\n'), ((2795, 2812), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2806, 2812), False, 'from collections import defaultdict\n')]
import math import os import xml.etree.ElementTree import numpy as np import paddle import six from PIL import Image from utils import image_util class Settings(object): def __init__(self, label_file_path=None, resize_h=300, resize_w=300, mean_value=127.5, std_value=0.007843, apply_distort=True, apply_expand=True, ap_version='11point'): self._ap_version = ap_version self._label_list = [] if label_file_path is not None: with open(label_file_path, 'r', encoding='utf-8') as f: lines = f.readlines() for line in lines: self._label_list.append(line.strip().replace('\n', '')) self._apply_distort = apply_distort self._apply_expand = apply_expand self._resize_height = resize_h self._resize_width = resize_w self._img_mean = mean_value self._img_std = std_value self._expand_prob = 0.5 self._expand_max_ratio = 4 self._hue_prob = 0.5 self._hue_delta = 18 self._contrast_prob = 0.5 self._contrast_delta = 0.5 self._saturation_prob = 0.5 self._saturation_delta = 0.5 self._brightness_prob = 0.5 self._brightness_delta = 0.125 @property def ap_version(self): return self._ap_version @property def apply_expand(self): return self._apply_expand @property def apply_distort(self): return self._apply_distort @property def label_list(self): return self._label_list @property def resize_h(self): return self._resize_height @property def resize_w(self): return self._resize_width @property def img_mean(self): return self._img_mean @property def img_std(self): return self._img_std def preprocess(img, bbox_labels, mode, settings): img_width, img_height = img.size sampled_labels = bbox_labels if mode == 'train': if settings._apply_distort: img = image_util.distort_image(img, settings) if settings._apply_expand: img, bbox_labels, img_width, img_height = image_util.expand_image( img, bbox_labels, img_width, img_height, settings) # sampling, hard-code here batch_sampler = [image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0), image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)] sampled_bbox = image_util.generate_batch_samples(batch_sampler, bbox_labels) img = np.array(img) if len(sampled_bbox) > 0: idx = int(np.random.uniform(0, len(sampled_bbox))) img, sampled_labels = image_util.crop_image(img, bbox_labels, sampled_bbox[idx], img_width, img_height) img = Image.fromarray(img) img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS) img = np.array(img) if mode == 'train': mirror = int(np.random.uniform(0, 2)) if mirror == 1: img = img[:, ::-1, :] for i in range(len(sampled_labels)): tmp = sampled_labels[i][1] sampled_labels[i][1] = 1 - sampled_labels[i][3] sampled_labels[i][3] = 1 - tmp # HWC to CHW if len(img.shape) == 3: img = np.swapaxes(img, 1, 2) img = np.swapaxes(img, 1, 0) img = img.astype('float32') img -= settings.img_mean img = img * settings.img_std return img, sampled_labels def pascalvoc(settings, file_list, mode, batch_size, shuffle): def reader(): if mode == 'train' and shuffle: np.random.shuffle(file_list) batch_out = [] cnt = 0 for image in file_list: image_path, label_path = image.split('\t') if not os.path.exists(image_path): raise ValueError("%s is not exist, you should specify data path correctly." % image_path) im = Image.open(image_path) if im.mode == 'L': im = im.convert('RGB') im_width, im_height = im.size # layout: label | xmin | ymin | xmax | ymax | difficult bbox_labels = [] root = xml.etree.ElementTree.parse(label_path).getroot() for object in root.findall('object'): # start from 1 bbox_sample = [float(settings.label_list.index(object.find('name').text))] bbox = object.find('bndbox') difficult = float(object.find('difficult').text) bbox_sample.append(float(bbox.find('xmin').text) / im_width) bbox_sample.append(float(bbox.find('ymin').text) / im_height) bbox_sample.append(float(bbox.find('xmax').text) / im_width) bbox_sample.append(float(bbox.find('ymax').text) / im_height) bbox_sample.append(difficult) bbox_labels.append(bbox_sample) im, sample_labels = preprocess(im, bbox_labels, mode, settings) sample_labels = np.array(sample_labels) if len(sample_labels) == 0: continue im = im.astype('float32') boxes = sample_labels[:, 1:5] lbls = sample_labels[:, 0].astype('int32') difficults = sample_labels[:, -1].astype('int32') batch_out.append((im, boxes, lbls, difficults)) if len(batch_out) == batch_size: yield batch_out cnt += len(batch_out) batch_out = [] if mode == 'test' and len(batch_out) > 1: yield batch_out cnt += len(batch_out) return reader def train(settings, file_list_path, batch_size, shuffle=True, use_multiprocess=True, num_workers=4): readers = [] images = [line.strip() for line in open(file_list_path)] np.random.shuffle(images) n = int(math.ceil(len(images) // num_workers)) if use_multiprocess else len(images) image_lists = [images[i:i + n] for i in range(0, len(images), n)] for l in image_lists: readers.append(pascalvoc(settings, l, 'train', batch_size, shuffle)) if use_multiprocess: return paddle.reader.multiprocess_reader(readers, False) else: return readers[0] def test(settings, file_list_path, batch_size): image_list = [line.strip() for line in open(file_list_path)] return pascalvoc(settings, image_list, 'test', batch_size, False)
[ "numpy.random.uniform", "utils.image_util.sampler", "utils.image_util.crop_image", "utils.image_util.generate_batch_samples", "PIL.Image.open", "os.path.exists", "numpy.array", "numpy.swapaxes", "PIL.Image.fromarray", "utils.image_util.distort_image", "paddle.reader.multiprocess_reader", "numpy.random.shuffle", "utils.image_util.expand_image" ]
[((3440, 3453), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3448, 3453), True, 'import numpy as np\n'), ((6376, 6401), 'numpy.random.shuffle', 'np.random.shuffle', (['images'], {}), '(images)\n', (6393, 6401), True, 'import numpy as np\n'), ((3012, 3073), 'utils.image_util.generate_batch_samples', 'image_util.generate_batch_samples', (['batch_sampler', 'bbox_labels'], {}), '(batch_sampler, bbox_labels)\n', (3045, 3073), False, 'from utils import image_util\n'), ((3089, 3102), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3097, 3102), True, 'import numpy as np\n'), ((3331, 3351), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3346, 3351), False, 'from PIL import Image\n'), ((3845, 3867), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(2)'], {}), '(img, 1, 2)\n', (3856, 3867), True, 'import numpy as np\n'), ((3882, 3904), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(0)'], {}), '(img, 1, 0)\n', (3893, 3904), True, 'import numpy as np\n'), ((6704, 6753), 'paddle.reader.multiprocess_reader', 'paddle.reader.multiprocess_reader', (['readers', '(False)'], {}), '(readers, False)\n', (6737, 6753), False, 'import paddle\n'), ((2160, 2199), 'utils.image_util.distort_image', 'image_util.distort_image', (['img', 'settings'], {}), '(img, settings)\n', (2184, 2199), False, 'from utils import image_util\n'), ((2289, 2363), 'utils.image_util.expand_image', 'image_util.expand_image', (['img', 'bbox_labels', 'img_width', 'img_height', 'settings'], {}), '(img, bbox_labels, img_width, img_height, settings)\n', (2312, 2363), False, 'from utils import image_util\n'), ((2441, 2495), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(1)', '(1.0)', '(1.0)', '(1.0)', '(1.0)', '(0.0)', '(0.0)'], {}), '(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0)\n', (2459, 2495), False, 'from utils import image_util\n'), ((2522, 2577), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.1)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0)\n', (2540, 2577), False, 'from utils import image_util\n'), ((2604, 2659), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.3)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0)\n', (2622, 2659), False, 'from utils import image_util\n'), ((2686, 2741), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.5)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0)\n', (2704, 2741), False, 'from utils import image_util\n'), ((2768, 2823), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.7)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0)\n', (2786, 2823), False, 'from utils import image_util\n'), ((2850, 2905), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.9)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0)\n', (2868, 2905), False, 'from utils import image_util\n'), ((2932, 2987), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.0)', '(1.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)\n', (2950, 2987), False, 'from utils import image_util\n'), ((3234, 3319), 'utils.image_util.crop_image', 'image_util.crop_image', (['img', 'bbox_labels', 'sampled_bbox[idx]', 'img_width', 'img_height'], {}), '(img, bbox_labels, sampled_bbox[idx], img_width,\n img_height)\n', (3255, 3319), False, 'from utils import image_util\n'), ((3500, 3523), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (3517, 3523), True, 'import numpy as np\n'), ((4165, 4193), 'numpy.random.shuffle', 'np.random.shuffle', (['file_list'], {}), '(file_list)\n', (4182, 4193), True, 'import numpy as np\n'), ((4490, 4512), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4500, 4512), False, 'from PIL import Image\n'), ((5582, 5605), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (5590, 5605), True, 'import numpy as np\n'), ((4339, 4365), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (4353, 4365), False, 'import os\n')]
#!/usr/bin/python3 # LED Test import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) LED = 13 GPIO.setup(LED, GPIO.OUT) try: print("LED is now flashing..") print("Exit with CTRL+C") while True: GPIO.output(LED,1) time.sleep(0.5) GPIO.output(LED,0) time.sleep(0.5) except KeyboardInterrupt: GPIO.cleanup() print(" Bye Bye")
[ "RPi.GPIO.setmode", "RPi.GPIO.cleanup", "RPi.GPIO.setup", "time.sleep", "RPi.GPIO.output", "RPi.GPIO.setwarnings" ]
[((67, 89), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (79, 89), True, 'import RPi.GPIO as GPIO\n'), ((90, 113), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (106, 113), True, 'import RPi.GPIO as GPIO\n'), ((123, 148), 'RPi.GPIO.setup', 'GPIO.setup', (['LED', 'GPIO.OUT'], {}), '(LED, GPIO.OUT)\n', (133, 148), True, 'import RPi.GPIO as GPIO\n'), ((233, 252), 'RPi.GPIO.output', 'GPIO.output', (['LED', '(1)'], {}), '(LED, 1)\n', (244, 252), True, 'import RPi.GPIO as GPIO\n'), ((256, 271), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (266, 271), False, 'import time\n'), ((276, 295), 'RPi.GPIO.output', 'GPIO.output', (['LED', '(0)'], {}), '(LED, 0)\n', (287, 295), True, 'import RPi.GPIO as GPIO\n'), ((299, 314), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (309, 314), False, 'import time\n'), ((343, 357), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (355, 357), True, 'import RPi.GPIO as GPIO\n')]
from Script.import_emojis import Emojis from Script.import_functions import create_embed, int_to_str async def server_info(ctx): nb_humans = 0 for members in ctx.guild.members: if members.bot == 0: nb_humans += 1 nb_bots = 0 for members in ctx.guild.members: if members.bot == 1: nb_bots += 1 emojis = "" count = 0 for emoji in ctx.guild.emojis: if count > 10: emojis += "..." break emojis += f"{emoji} " count += 1 admins = "" count = 0 for member in ctx.guild.members: if count > 10: admins += "..." break if member.guild_permissions.administrator: admins += f"{member.mention} " count += 1 embed = create_embed(ctx.guild.name, f"{Emojis['Owner']} Owner : {ctx.guild.owner.mention}\n{Emojis['Calendar']} Created at : {ctx.guild.created_at.date().isoformat()}\n{Emojis['Members']} Humans : {int_to_str(nb_humans)}\n{Emojis['Bot']} Bots : {int_to_str(nb_bots)}\n{Emojis['Pin']} Region : {ctx.guild.region}\n{Emojis['Boost']} Boost level : {ctx.guild.premium_tier}/3\n{Emojis['Boost']} Boost number : {ctx.guild.premium_subscription_count}\n{Emojis['Emoji_ghost']} emojis : {emojis}\nAdministrators : {admins}", ctx.guild.me.color, "", ctx.guild.icon_url) embed.set_thumbnail(url=ctx.guild.icon_url) await ctx.send(embed=embed) return
[ "Script.import_functions.int_to_str" ]
[((990, 1011), 'Script.import_functions.int_to_str', 'int_to_str', (['nb_humans'], {}), '(nb_humans)\n', (1000, 1011), False, 'from Script.import_functions import create_embed, int_to_str\n'), ((1038, 1057), 'Script.import_functions.int_to_str', 'int_to_str', (['nb_bots'], {}), '(nb_bots)\n', (1048, 1057), False, 'from Script.import_functions import create_embed, int_to_str\n')]
# Copyright (c) 2011-2021, Camptocamp SA # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # The views and conclusions contained in the software and documentation are those # of the authors and should not be interpreted as representing official policies, # either expressed or implied, of the FreeBSD Project. import logging from typing import Dict, Optional, Set, cast import pyramid.request from pyramid.httpexceptions import HTTPBadRequest from sqlalchemy.orm.exc import NoResultFound from c2cgeoportal_commons.lib.url import Url, get_url2 from c2cgeoportal_commons.models import DBSession, main from c2cgeoportal_geoportal.lib.caching import get_region from c2cgeoportal_geoportal.views.proxy import Proxy CACHE_REGION = get_region("std") LOG = logging.getLogger(__name__) class OGCProxy(Proxy): """ Proxy implementation that manly manage the ogcserver parameter. Then load the corresponding OGCServer. """ def __init__(self, request: pyramid.request.Request, has_default_ogc_server: bool = False): Proxy.__init__(self, request) # params hold the parameters we"re going to send to backend self.params = dict(self.request.params) # reset possible value of role_id and user_id if "role_id" in self.params: del self.params["role_id"] if "user_id" in self.params: del self.params["user_id"] self.lower_params = self._get_lower_params(self.params) if not has_default_ogc_server and "ogcserver" not in self.params: raise HTTPBadRequest("The querystring argument 'ogcserver' is required") if "ogcserver" in self.params: self.ogc_server = self._get_ogcserver_byname(self.params["ogcserver"]) @CACHE_REGION.cache_on_arguments() # type: ignore def _get_ogcserver_byname(self, name: str) -> main.OGCServer: # pylint: disable=no-self-use try: result = DBSession.query(main.OGCServer).filter(main.OGCServer.name == name).one() DBSession.expunge(result) return cast(main.OGCServer, result) except NoResultFound: raise HTTPBadRequest( # pylint: disable=raise-missing-from f"The OGC Server '{name}' does not exist (existing: " f"{','.join([t[0] for t in DBSession.query(main.OGCServer.name).all()])})." ) def _get_wms_url(self, errors: Set[str]) -> Optional[Url]: ogc_server = self.ogc_server url = get_url2(f"The OGC server '{ogc_server.name}'", ogc_server.url, self.request, errors) if errors: LOG.error("\n".join(errors)) return url def _get_wfs_url(self, errors: Set[str]) -> Optional[Url]: ogc_server = self.ogc_server url = get_url2( f"The OGC server (WFS) '{ogc_server.name}'", ogc_server.url_wfs or ogc_server.url, self.request, errors, ) if errors: LOG.error("\n".join(errors)) return url def get_headers(self) -> Dict[str, str]: headers: Dict[str, str] = super().get_headers() if self.ogc_server.type == main.OGCSERVER_TYPE_QGISSERVER: headers["X-Qgis-Service-Url"] = self.request.current_route_url( _query={"ogcserver": self.ogc_server.name} ) return headers
[ "c2cgeoportal_commons.models.DBSession.query", "typing.cast", "c2cgeoportal_commons.models.DBSession.expunge", "c2cgeoportal_geoportal.lib.caching.get_region", "c2cgeoportal_commons.lib.url.get_url2", "c2cgeoportal_geoportal.views.proxy.Proxy.__init__", "pyramid.httpexceptions.HTTPBadRequest", "logging.getLogger" ]
[((1981, 1998), 'c2cgeoportal_geoportal.lib.caching.get_region', 'get_region', (['"""std"""'], {}), "('std')\n", (1991, 1998), False, 'from c2cgeoportal_geoportal.lib.caching import get_region\n'), ((2005, 2032), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2022, 2032), False, 'import logging\n'), ((2291, 2320), 'c2cgeoportal_geoportal.views.proxy.Proxy.__init__', 'Proxy.__init__', (['self', 'request'], {}), '(self, request)\n', (2305, 2320), False, 'from c2cgeoportal_geoportal.views.proxy import Proxy\n'), ((3731, 3821), 'c2cgeoportal_commons.lib.url.get_url2', 'get_url2', (['f"""The OGC server \'{ogc_server.name}\'"""', 'ogc_server.url', 'self.request', 'errors'], {}), '(f"The OGC server \'{ogc_server.name}\'", ogc_server.url, self.\n request, errors)\n', (3739, 3821), False, 'from c2cgeoportal_commons.lib.url import Url, get_url2\n'), ((4011, 4128), 'c2cgeoportal_commons.lib.url.get_url2', 'get_url2', (['f"""The OGC server (WFS) \'{ogc_server.name}\'"""', '(ogc_server.url_wfs or ogc_server.url)', 'self.request', 'errors'], {}), '(f"The OGC server (WFS) \'{ogc_server.name}\'", ogc_server.url_wfs or\n ogc_server.url, self.request, errors)\n', (4019, 4128), False, 'from c2cgeoportal_commons.lib.url import Url, get_url2\n'), ((2802, 2868), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['"""The querystring argument \'ogcserver\' is required"""'], {}), '("The querystring argument \'ogcserver\' is required")\n', (2816, 2868), False, 'from pyramid.httpexceptions import HTTPBadRequest\n'), ((3264, 3289), 'c2cgeoportal_commons.models.DBSession.expunge', 'DBSession.expunge', (['result'], {}), '(result)\n', (3281, 3289), False, 'from c2cgeoportal_commons.models import DBSession, main\n'), ((3309, 3337), 'typing.cast', 'cast', (['main.OGCServer', 'result'], {}), '(main.OGCServer, result)\n', (3313, 3337), False, 'from typing import Dict, Optional, Set, cast\n'), ((3178, 3209), 'c2cgeoportal_commons.models.DBSession.query', 'DBSession.query', (['main.OGCServer'], {}), '(main.OGCServer)\n', (3193, 3209), False, 'from c2cgeoportal_commons.models import DBSession, main\n'), ((3553, 3589), 'c2cgeoportal_commons.models.DBSession.query', 'DBSession.query', (['main.OGCServer.name'], {}), '(main.OGCServer.name)\n', (3568, 3589), False, 'from c2cgeoportal_commons.models import DBSession, main\n')]
import numpy as np from napari.utils import nbscreenshot def test_nbscreenshot(viewer_factory): """Test taking a screenshot.""" view, viewer = viewer_factory() np.random.seed(0) data = np.random.random((10, 15)) viewer.add_image(data) rich_display_object = nbscreenshot(viewer) assert hasattr(rich_display_object, '_repr_png_') # Trigger method that would run in jupyter notebook cell automatically rich_display_object._repr_png_() assert rich_display_object.image is not None
[ "numpy.random.random", "numpy.random.seed", "napari.utils.nbscreenshot" ]
[((176, 193), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (190, 193), True, 'import numpy as np\n'), ((205, 231), 'numpy.random.random', 'np.random.random', (['(10, 15)'], {}), '((10, 15))\n', (221, 231), True, 'import numpy as np\n'), ((286, 306), 'napari.utils.nbscreenshot', 'nbscreenshot', (['viewer'], {}), '(viewer)\n', (298, 306), False, 'from napari.utils import nbscreenshot\n')]
import base64 import random import time from RGUtil.RGCodeUtil import RGResCode def get_data_with_request(_request): if _request.is_json: return _request.json return _request.values # if _request.method == "POST": # return _request.form # elif _request.json: # return _request.json # return _request.args def request_value(_request, key, default=None): args = get_data_with_request(_request) if key in args: return args[key] else: return default def request_ip(_request, default=None): headers = _request.headers if 'X-Real-Ip' in headers: return headers['X-Real-Ip'] else: return default def form_res(code, data=None): if code == 0: code = RGResCode.not_existed if data is None else RGResCode.ok if data is not None: if not isinstance(data, dict) and not isinstance(data, list): data = data.__dict__ res = { 'code': int(code), 'data': data } return res else: res = { 'code': int(code), } return res def is_int_number(s): try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False def request_file_size(request): re_files = request.files size = 0 for file_key in re_files: file = re_files[file_key] file.seek(0) size += len(file.read()) file.seek(0) return size def request_file_mine(request): re_files = request.files size = 0 for file_key in re_files: file = re_files[file_key] file.seek(0) size += len(file.read()) file.seek(0) return size baseList = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnopqrstuvwxyz' def encode(n, b=58): """ :param n: 压缩的数字 :param b: 进制 最大58 :return: 对应进制字符串 """ result = '' x = int(n) while True: x, y = divmod(x, b) result = baseList[y] + result if x <= 0: break return result def decode(n, b=58): """ :param n: 数字压缩后的字符串 :param b: 对应的进制 最大58 :return: 原来的数字 """ result = 0 length = len(n) for index in range(length): result += (baseList.index(n[index]) * pow(b, length - index - 1)) return result def did_encode(dir_id, uid): dir_id = int(dir_id) + 10 dir_id = str(dir_id) count = 0 # if len(dir_id) < 8: # count = 8 - len(dir_id) # dir_id = ''.join(['0', '0', '0', '0', '0', '0', '0', '0'][:count]) + dir_id count = str(count) uid = encode(uid) dir_id = encode(dir_id) content = '{}{}.{}.{}'.format(dir_id, uid, count, len(str(uid))) return safe_encode_b64(content, random_index=0) def did_decode(content): content = safe_decode_b64(content) contents = content.split(sep='.') uid_count = int(contents[-1]) count = int(contents[-2]) content = contents[0] uid = content[-uid_count:] dir_id = content[:-uid_count] dir_id = dir_id[count:] return int(decode(dir_id)) - 10, int(decode(uid)) def fid_encode(f_id, uid): f_id = int(f_id) + 10 time_str = str((time.time_ns()//1000) % 10000000) f_id = '{}{}'.format(f_id, time_str) f_id = encode(int(f_id)) return safe_encode_b64('{}.{}.{}'.format(f_id, uid, len(time_str))) def fid_decode(content): content = safe_decode_b64(content) contents = content.split(sep='.') length = int(contents[-1]) uid = contents[-2] f_id = str(decode(contents[0])) f_id = f_id[0:-length] return int(f_id) - 10, uid def safe_encode_b64(content, random_index=None): content = base64.urlsafe_b64encode(content.encode("utf-8")) content = str(content, "utf-8") del_count = 0 for i in range(len(content) - 1, -1, -1): if content[i] == '=': del_count += 1 content = content[:-1] else: break if random_index is None: index = random.randint(0, len(content) - 1) else: index = 0 return encode(index) + content[:index] + encode(del_count) + content[index:] def safe_decode_b64(content): index = decode(content[0]) content = content[1:] del_count = decode(content[index]) content = content[:index] + content[index+1:] for i in range(del_count): content += '=' return str(base64.urlsafe_b64decode(content.encode("utf-8")), "utf-8") def bytes_to_hex_string(bytes): result = '' for byte in bytes: result += '%02X' % byte return result def hex_string_to_bytes(hex_string): byte_array = bytearray() for index in range(len(hex_string) // 2): temp = hex_string[2 * index:2 * index + 2] temp = bytes(temp, encoding='utf-8') temp = int(temp, base=16) byte_array.append(temp) return byte_array if __name__ == '__main__': code = encode(1000, 58) print('#58', code) print('#10', decode(code, 58)) dir_id = 122134 uid = 9812312332 print('did', dir_id, 'user_id', uid) code = did_encode(dir_id=dir_id, uid=uid) print('did_encode', code) did, user_id = did_decode(code) print('did', did, 'user_id', user_id) token = fid_encode(f_id=dir_id, uid=uid) print('fid_encode', token) did, user_id = fid_decode(token) print('did', did, 'user_id', user_id)
[ "unicodedata.numeric", "time.time_ns" ]
[((1285, 1307), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (1304, 1307), False, 'import unicodedata\n'), ((3331, 3345), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (3343, 3345), False, 'import time\n')]
from ..files import ObjectReader from ..streams import EndianBinaryWriter from ..helpers import ImportHelper from .. import files from ..enums import FileType, ClassIDType import os from .. import environment def save_ptr(obj, writer: EndianBinaryWriter): if isinstance(obj, PPtr): writer.write_int(obj.file_id) else: writer.write_int(0) # it's usually 0...... if obj._version < 14: writer.write_int(obj.path_id) else: writer.write_long(obj.path_id) cached_managers = dict() class PPtr: def __init__(self, reader: ObjectReader): self._version = reader.version2 self.index = -2 self.file_id = reader.read_int() self.path_id = reader.read_int() if self._version < 14 else reader.read_long() self.assets_file = reader.assets_file self._obj = None def save(self, writer: EndianBinaryWriter): save_ptr(self, writer) def get_obj(self): if self._obj != None: return self._obj manager = None if self.file_id == 0: manager = self.assets_file elif self.file_id > 0 and self.file_id - 1 < len(self.assets_file.externals): if self.index == -2: external_name = self.assets_file.externals[self.file_id - 1].name parent = self.assets_file.parent if parent is not None: if external_name in parent.files: manager = parent.files[external_name] elif external_name.upper() in parent.files: manager = parent.files[external_name.upper()] else: while not isinstance(parent, environment.Environment): parent = parent.parent if parent.path: path = parent.path files = os.listdir(path) if external_name in files: parent.load_files([os.path.join(path, external_name)]) manager = parent.files[external_name] else: if external_name not in cached_managers: typ, reader = ImportHelper.check_file_type(external_name) if typ == FileType.AssetsFile: cached_managers[external_name] = files.SerializedFile(reader) if external_name in cached_managers: manager = cached_managers[external_name] if manager and self.path_id in manager.objects: self._obj = manager.objects[self.path_id] else: self._obj = None return self._obj def __getattr__(self, key): obj = self.get_obj() if obj is None: if key == "type": return ClassIDType.UnknownType raise AttributeError(key) return getattr(obj, key) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self._obj.__class__.__repr__(self.get_obj()) if self.get_obj() else "Not Found") def __bool__(self): return True if self.get_obj() else False
[ "os.path.join", "os.listdir" ]
[((1922, 1938), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1932, 1938), False, 'import os\n'), ((2045, 2078), 'os.path.join', 'os.path.join', (['path', 'external_name'], {}), '(path, external_name)\n', (2057, 2078), False, 'import os\n')]
import os import numpy as np import torch import torch.nn as nn import matplotlib.pyplot as plt from medpy.metric import binary #use gpu if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class AE(nn.Module): def __init__(self, latent_size=100): super().__init__() self.init_layers(latent_size) self.apply(self.weight_init) self.loss_function=self.Loss() self.metrics=self.Metrics() self.optimizer=torch.optim.Adam(self.parameters(),lr=2e-4,weight_decay=1e-5) def init_layers(self,latent_size): self.encoder = nn.Sequential( nn.Conv2d(in_channels=4,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=32,out_channels=64,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=128), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=128,out_channels=64,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=64,out_channels=32,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.Conv2d(in_channels=32,out_channels=latent_size,kernel_size=4,stride=2,padding=1) ) self.decoder = nn.Sequential( nn.ConvTranspose2d(in_channels=latent_size,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=128), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=128,out_channels=64,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=64), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=64,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1), nn.BatchNorm2d(num_features=32), nn.LeakyReLU(.2), nn.Dropout(0.5), nn.ConvTranspose2d(in_channels=32,out_channels=4,kernel_size=4,stride=2,padding=1), nn.Softmax(dim=1) ) def weight_init(self,m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): nn.init.kaiming_uniform_(m.weight) def forward(self, x): latent = self.encoder(x) reconstruction = self.decoder(latent) return reconstruction class Loss(): def __init__(self,call_id=0): self.MSELoss=nn.MSELoss() self.GDLoss=self.GDLoss() class GDLoss: def __call__(self,x,y): tp=torch.sum(x*y,dim=(0,2,3)) fp=torch.sum(x*(1-y),dim=(0,2,3)) fn=torch.sum((1-x)*y,dim=(0,2,3)) nominator=2*tp+1e-05 denominator=2*tp+fp+fn+1e-05 dice_score=-(nominator/(denominator+1e-8))[1:].mean() return dice_score def __call__(self,prediction,target,epoch=None,validation=False): contributes={} contributes["MSELoss"]=self.MSELoss(prediction,target) contributes["GDLoss"]=self.GDLoss(prediction,target) contributes["Total"]=contributes["MSELoss"]+contributes["GDLoss"] if validation: return {k:v.item() for k,v in contributes.items()} return contributes["Total"] class Metrics(): def __init__(self): self.DC=self.DC() self.HD=self.HD() class DC: def __call__(self,prediction,target): try: return binary.dc(prediction,target) except Exception: return 0 class HD: def __call__(self,prediction,target): try: return binary.hd(prediction,target) except Exception: return np.nan def __call__(self,prediction,target,validation=False): metrics={} for c,key in enumerate(["BK_","RV_","MYO_","LV_"]): ref=np.copy(target) pred=np.copy(prediction) ref=np.where(ref!=c,0,1) pred=np.where(pred!=c,0,1) metrics[key+"dc"]=self.DC(pred,ref) metrics[key+"hd"]=self.HD(pred,ref) return metrics def training_routine(self,epochs,train_loader,val_loader,ckpt_folder): if not os.path.isdir(ckpt_folder): os.mkdir(ckpt_folder) history = [] best_acc = None for epoch in epochs: #training self.train() for patient in train_loader: for batch in patient: batch=batch.to(device) self.optimizer.zero_grad() reconstruction=self.forward(batch) loss=self.loss_function(reconstruction,batch,epoch) loss.backward() self.optimizer.step() #validation self.eval() with torch.no_grad(): result = self.evaluation_routine(val_loader) #checkpoint if(best_acc==None or result['Total']<best_acc or epoch%10==0): ckpt=os.path.join(ckpt_folder,"{:03d}.pth".format(epoch)) if(best_acc==None or result['Total']<best_acc): best_acc=result['Total']; ckpt=ckpt.split(".pth")[0]+"_best.pth" torch.save({"AE": self.state_dict(),"AE_optim": self.optimizer.state_dict(),"epoch": epoch},ckpt) #report self.epoch_end(epoch, result) history.append(result) return history def evaluation_routine(self,val_loader): epoch_summary={} for patient in val_loader: gt=[];reconstruction=[] #loss terms for batch in patient: batch={"gt":batch.to(device)} batch["reconstruction"]=self.forward(batch["gt"]) gt=torch.cat([gt,batch["gt"]],dim=0) if len(gt)>0 else batch["gt"] reconstruction=torch.cat([reconstruction,batch["reconstruction"]],dim=0) if len(reconstruction)>0 else batch["reconstruction"] for k,v in self.loss_function(batch["reconstruction"],batch["gt"],validation=True).items(): if k not in epoch_summary.keys(): epoch_summary[k]=[] epoch_summary[k].append(v) #validation metrics gt=np.argmax(gt.cpu().numpy(),axis=1) gt={"ED":gt[:len(gt)//2],"ES":gt[len(gt)//2:]} reconstruction=np.argmax(reconstruction.cpu().numpy(),axis=1) reconstruction={"ED":reconstruction[:len(reconstruction)//2],"ES":reconstruction[len(reconstruction)//2:]} for phase in ["ED","ES"]: for k,v in self.metrics(reconstruction[phase],gt[phase]).items(): if k not in epoch_summary.keys(): epoch_summary[k]=[] epoch_summary[k].append(v) epoch_summary={k:np.mean(v) for k,v in epoch_summary.items()} return epoch_summary def epoch_end(self,epoch,result): print("\033[1mEpoch [{}]\033[0m".format(epoch)) header,row="","" for k,v in result.items(): header+="{:.6}\t".format(k);row+="{:.6}\t".format("{:.4f}".format(v)) print(header);print(row) def plot_history(history): losses = [x['Total'] for x in history] plt.plot(losses, '-x', label="loss") plt.xlabel('epoch') plt.ylabel('loss') plt.legend() plt.title('Losses vs. No. of epochs') plt.grid() plt.show()
[ "matplotlib.pyplot.title", "torch.nn.Dropout", "os.mkdir", "torch.cat", "numpy.mean", "torch.nn.Softmax", "torch.no_grad", "torch.nn.MSELoss", "numpy.copy", "medpy.metric.binary.dc", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "matplotlib.pyplot.ylabel", "torch.nn.LeakyReLU", "torch.nn.init.kaiming_uniform_", "matplotlib.pyplot.grid", "torch.sum", "torch.nn.ConvTranspose2d", "matplotlib.pyplot.plot", "medpy.metric.binary.hd", "os.path.isdir", "numpy.where", "matplotlib.pyplot.xlabel" ]
[((8627, 8663), 'matplotlib.pyplot.plot', 'plt.plot', (['losses', '"""-x"""'], {'label': '"""loss"""'}), "(losses, '-x', label='loss')\n", (8635, 8663), True, 'import matplotlib.pyplot as plt\n'), ((8666, 8685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8676, 8685), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8698, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8709, 8721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8719, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8724, 8761), 'matplotlib.pyplot.title', 'plt.title', (['"""Losses vs. No. of epochs"""'], {}), "('Losses vs. No. of epochs')\n", (8733, 8761), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8774), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8772, 8774), True, 'import matplotlib.pyplot as plt\n'), ((8777, 8787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8785, 8787), True, 'import matplotlib.pyplot as plt\n'), ((184, 209), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (207, 209), False, 'import torch\n'), ((599, 676), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(4)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=4, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (608, 676), True, 'import torch.nn as nn\n'), ((680, 711), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (694, 711), True, 'import torch.nn as nn\n'), ((719, 736), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (731, 736), True, 'import torch.nn as nn\n'), ((743, 758), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (753, 758), True, 'import torch.nn as nn\n'), ((767, 845), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (776, 845), True, 'import torch.nn as nn\n'), ((849, 880), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (863, 880), True, 'import torch.nn as nn\n'), ((888, 905), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (900, 905), True, 'import torch.nn as nn\n'), ((912, 927), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (922, 927), True, 'import torch.nn as nn\n'), ((936, 1014), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (945, 1014), True, 'import torch.nn as nn\n'), ((1018, 1049), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (1032, 1049), True, 'import torch.nn as nn\n'), ((1057, 1074), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1069, 1074), True, 'import torch.nn as nn\n'), ((1081, 1096), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1091, 1096), True, 'import torch.nn as nn\n'), ((1105, 1183), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1)\n', (1114, 1183), True, 'import torch.nn as nn\n'), ((1187, 1218), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (1201, 1218), True, 'import torch.nn as nn\n'), ((1226, 1243), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1238, 1243), True, 'import torch.nn as nn\n'), ((1250, 1265), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1260, 1265), True, 'import torch.nn as nn\n'), ((1274, 1352), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1)\n', (1283, 1352), True, 'import torch.nn as nn\n'), ((1356, 1387), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1370, 1387), True, 'import torch.nn as nn\n'), ((1395, 1412), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1407, 1412), True, 'import torch.nn as nn\n'), ((1419, 1434), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1429, 1434), True, 'import torch.nn as nn\n'), ((1443, 1521), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n', (1452, 1521), True, 'import torch.nn as nn\n'), ((1525, 1556), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1539, 1556), True, 'import torch.nn as nn\n'), ((1564, 1581), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1576, 1581), True, 'import torch.nn as nn\n'), ((1588, 1603), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1598, 1603), True, 'import torch.nn as nn\n'), ((1612, 1691), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1)\n', (1621, 1691), True, 'import torch.nn as nn\n'), ((1695, 1727), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(128)'}), '(num_features=128)\n', (1709, 1727), True, 'import torch.nn as nn\n'), ((1735, 1752), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1747, 1752), True, 'import torch.nn as nn\n'), ((1759, 1774), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1769, 1774), True, 'import torch.nn as nn\n'), ((1783, 1862), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1)\n', (1792, 1862), True, 'import torch.nn as nn\n'), ((1866, 1897), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1880, 1897), True, 'import torch.nn as nn\n'), ((1905, 1922), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1917, 1922), True, 'import torch.nn as nn\n'), ((1929, 1944), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1939, 1944), True, 'import torch.nn as nn\n'), ((1953, 2031), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1)\n', (1962, 2031), True, 'import torch.nn as nn\n'), ((2035, 2066), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (2049, 2066), True, 'import torch.nn as nn\n'), ((2074, 2091), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2086, 2091), True, 'import torch.nn as nn\n'), ((2098, 2113), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2108, 2113), True, 'import torch.nn as nn\n'), ((2122, 2213), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': 'latent_size', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=latent_size, kernel_size=4, stride=2,\n padding=1)\n', (2131, 2213), True, 'import torch.nn as nn\n'), ((2253, 2353), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'latent_size', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=latent_size, out_channels=32, kernel_size=4,\n stride=2, padding=1)\n', (2271, 2353), True, 'import torch.nn as nn\n'), ((2353, 2384), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (2367, 2384), True, 'import torch.nn as nn\n'), ((2392, 2409), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2404, 2409), True, 'import torch.nn as nn\n'), ((2416, 2431), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2426, 2431), True, 'import torch.nn as nn\n'), ((2440, 2531), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=64, kernel_size=3, stride=1,\n padding=1)\n', (2458, 2531), True, 'import torch.nn as nn\n'), ((2531, 2562), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (2545, 2562), True, 'import torch.nn as nn\n'), ((2570, 2587), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2582, 2587), True, 'import torch.nn as nn\n'), ((2594, 2609), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2604, 2609), True, 'import torch.nn as nn\n'), ((2618, 2711), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=128, kernel_size=3, stride=\n 1, padding=1)\n', (2636, 2711), True, 'import torch.nn as nn\n'), ((2710, 2742), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(128)'}), '(num_features=128)\n', (2724, 2742), True, 'import torch.nn as nn\n'), ((2750, 2767), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2762, 2767), True, 'import torch.nn as nn\n'), ((2774, 2789), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2784, 2789), True, 'import torch.nn as nn\n'), ((2798, 2891), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=128, out_channels=64, kernel_size=4, stride=\n 2, padding=1)\n', (2816, 2891), True, 'import torch.nn as nn\n'), ((2890, 2921), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (2904, 2921), True, 'import torch.nn as nn\n'), ((2929, 2946), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2941, 2946), True, 'import torch.nn as nn\n'), ((2953, 2968), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2963, 2968), True, 'import torch.nn as nn\n'), ((2977, 3068), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=64, kernel_size=3, stride=1,\n padding=1)\n', (2995, 3068), True, 'import torch.nn as nn\n'), ((3068, 3099), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (3082, 3099), True, 'import torch.nn as nn\n'), ((3107, 3124), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3119, 3124), True, 'import torch.nn as nn\n'), ((3131, 3146), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3141, 3146), True, 'import torch.nn as nn\n'), ((3155, 3246), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=64, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3173, 3246), True, 'import torch.nn as nn\n'), ((3246, 3277), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3260, 3277), True, 'import torch.nn as nn\n'), ((3285, 3302), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3297, 3302), True, 'import torch.nn as nn\n'), ((3309, 3324), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3319, 3324), True, 'import torch.nn as nn\n'), ((3333, 3424), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=3, stride=1,\n padding=1)\n', (3351, 3424), True, 'import torch.nn as nn\n'), ((3424, 3455), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3438, 3455), True, 'import torch.nn as nn\n'), ((3463, 3480), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3475, 3480), True, 'import torch.nn as nn\n'), ((3487, 3502), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3497, 3502), True, 'import torch.nn as nn\n'), ((3511, 3602), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3529, 3602), True, 'import torch.nn as nn\n'), ((3602, 3633), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3616, 3633), True, 'import torch.nn as nn\n'), ((3641, 3658), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3653, 3658), True, 'import torch.nn as nn\n'), ((3665, 3680), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3675, 3680), True, 'import torch.nn as nn\n'), ((3689, 3780), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3707, 3780), True, 'import torch.nn as nn\n'), ((3780, 3811), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3794, 3811), True, 'import torch.nn as nn\n'), ((3819, 3836), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3831, 3836), True, 'import torch.nn as nn\n'), ((3843, 3858), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3853, 3858), True, 'import torch.nn as nn\n'), ((3867, 3957), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(4)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=4, kernel_size=4, stride=2,\n padding=1)\n', (3885, 3957), True, 'import torch.nn as nn\n'), ((3957, 3974), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3967, 3974), True, 'import torch.nn as nn\n'), ((4085, 4119), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (4109, 4119), True, 'import torch.nn as nn\n'), ((4312, 4324), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4322, 4324), True, 'import torch.nn as nn\n'), ((5977, 6003), 'os.path.isdir', 'os.path.isdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (5990, 6003), False, 'import os\n'), ((6011, 6032), 'os.mkdir', 'os.mkdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (6019, 6032), False, 'import os\n'), ((8236, 8246), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (8243, 8246), True, 'import numpy as np\n'), ((4423, 4454), 'torch.sum', 'torch.sum', (['(x * y)'], {'dim': '(0, 2, 3)'}), '(x * y, dim=(0, 2, 3))\n', (4432, 4454), False, 'import torch\n'), ((4461, 4498), 'torch.sum', 'torch.sum', (['(x * (1 - y))'], {'dim': '(0, 2, 3)'}), '(x * (1 - y), dim=(0, 2, 3))\n', (4470, 4498), False, 'import torch\n'), ((4503, 4540), 'torch.sum', 'torch.sum', (['((1 - x) * y)'], {'dim': '(0, 2, 3)'}), '((1 - x) * y, dim=(0, 2, 3))\n', (4512, 4540), False, 'import torch\n'), ((5656, 5671), 'numpy.copy', 'np.copy', (['target'], {}), '(target)\n', (5663, 5671), True, 'import numpy as np\n'), ((5685, 5704), 'numpy.copy', 'np.copy', (['prediction'], {}), '(prediction)\n', (5692, 5704), True, 'import numpy as np\n'), ((5718, 5742), 'numpy.where', 'np.where', (['(ref != c)', '(0)', '(1)'], {}), '(ref != c, 0, 1)\n', (5726, 5742), True, 'import numpy as np\n'), ((5752, 5777), 'numpy.where', 'np.where', (['(pred != c)', '(0)', '(1)'], {}), '(pred != c, 0, 1)\n', (5760, 5777), True, 'import numpy as np\n'), ((6477, 6492), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6490, 6492), False, 'import torch\n'), ((5267, 5296), 'medpy.metric.binary.dc', 'binary.dc', (['prediction', 'target'], {}), '(prediction, target)\n', (5276, 5296), False, 'from medpy.metric import binary\n'), ((5430, 5459), 'medpy.metric.binary.hd', 'binary.hd', (['prediction', 'target'], {}), '(prediction, target)\n', (5439, 5459), False, 'from medpy.metric import binary\n'), ((7304, 7339), 'torch.cat', 'torch.cat', (["[gt, batch['gt']]"], {'dim': '(0)'}), "([gt, batch['gt']], dim=0)\n", (7313, 7339), False, 'import torch\n'), ((7391, 7450), 'torch.cat', 'torch.cat', (["[reconstruction, batch['reconstruction']]"], {'dim': '(0)'}), "([reconstruction, batch['reconstruction']], dim=0)\n", (7400, 7450), False, 'import torch\n')]
# -*- coding: utf-8 -*- # Copyright (c) 2010-2016, MIT Probabilistic Computing Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import StringIO import apsw import pytest import struct import bayeslite import bayeslite.ast as ast import bayeslite.compiler as compiler import bayeslite.core as core import bayeslite.guess as guess import bayeslite.backends.troll_rng as troll import bayeslite.parse as parse from bayeslite.exception import BQLError from bayeslite.math_util import relerr from bayeslite.backends.cgpm_backend import CGPM_Backend from bayeslite.util import cursor_value import test_core import test_csv from stochastic import stochastic def bql2sql(string, setup=None): with bayeslite.bayesdb_open(':memory:') as bdb: test_core.t1_schema(bdb) test_core.t1_data(bdb) bdb.execute(''' create population p1 for t1 ( id ignore; label nominal; age numerical; weight numerical ) ''') if setup is not None: setup(bdb) phrases = parse.parse_bql_string(string) out = compiler.Output(0, {}, ()) for phrase in phrases: assert ast.is_query(phrase) compiler.compile_query(bdb, phrase, out) out.write(';') return out.getvalue() # XXX Kludgey mess. Please reorganize. def bql2sqlparam(string): with bayeslite.bayesdb_open(':memory:') as bdb: test_core.t1_schema(bdb) test_core.t1_data(bdb) bdb.execute(''' create population p1 for t1 ( id ignore; label nominal; age numerical; weight numerical ) ''') phrases = parse.parse_bql_string(string) out0 = StringIO.StringIO() for phrase in phrases: out = None if isinstance(phrase, ast.Parametrized): bindings = (None,) * phrase.n_numpar out = compiler.Output(phrase.n_numpar, phrase.nampar_map, bindings) phrase = phrase.phrase else: out = StringIO.StringIO() assert ast.is_query(phrase) compiler.compile_query(bdb, phrase, out) # XXX Do something about the parameters. out0.write(out.getvalue()) out0.write(';') return out0.getvalue() def bql_execute(bdb, string, bindings=()): return map(tuple, bdb.execute(string, bindings)) def empty(cursor): assert cursor is not None assert cursor.description is not None assert len(cursor.description) == 0 with pytest.raises(StopIteration): cursor.next() def test_trivial_population(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) # XXX if (not) exists bdb.execute(''' create population p for t ( guess stattypes of (*); age numerical ) ''') bdb.execute('drop population p') def test_population_invalid_numerical(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) with pytest.raises(BQLError): bdb.execute(''' create population p for t ( guess stattypes of (*); gender numerical ) ''') def test_population_invalid_numerical_alterpop_addvar(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute(''' create population p for t ( guess stattypes of (*); ignore gender ) ''') with pytest.raises(BQLError): bdb.execute('alter population p add variable gender numerical') bdb.execute('drop population p') def test_population_invalid_numerical_alterpop_stattype(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute(''' create population p for t ( guess stattypes of (*); gender nominal ) ''') with pytest.raises(BQLError): bdb.execute(''' alter population p set stattype of gender to numerical ''') bdb.execute('drop population p') def test_similarity_identity(): with test_core.t1() as (bdb, population_id, _generator_id): bdb.execute('initialize 6 models for p1_cc;') rowids = bdb.sql_execute('select rowid from t1') for rowid in rowids: c = bdb.execute(''' estimate similarity of (rowid=?) to (rowid=?) in the context of age by p1 ''', (rowid[0], rowid[0])).fetchall() assert len(c) == 1 assert c[0][0] == 1 def test_predictive_relevance(): assert bql2sql(''' estimate predictive relevance of (label = 'Uganda') to existing rows (rowid < 4) and hypothetical rows with values ( ("age" = 82, "weight" = 14), ("age" = 74, label = 'Europe', "weight" = 7) ) in the context of "weight" by p1 ''') == \ 'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \ '(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\ '\'[1, 2, 3]\', 3, '\ '2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);' assert bql2sql(''' estimate predictive relevance of (label = 'mumble') to existing rows (label = 'frotz' or age <= 4) in the context of "label" by p1 ''') == \ 'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \ '(SELECT _rowid_ FROM "t1" WHERE ("label" = \'mumble\')), '\ '\'[5, 8]\', 1);' assert bql2sql(''' estimate label, predictive relevance to hypothetical rows with values ( ("age" = 82, "weight" = 14), ("age" = 74, label = 'hunf', "weight" = 7) ) in the context of "age", _rowid_ + 1 from p1 ''') == \ 'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\ '\'[]\', 2, 2, 82, 3, 14, NULL, 2, 74, 1, \'hunf\', 3, 7, NULL), '\ '("_rowid_" + 1) FROM "t1";' # No matching rows should still compile. assert bql2sql(''' estimate label, predictive relevance to existing rows (rowid < 0) in the context of "age" from p1 ''') == \ 'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\ '\'[]\', 2) FROM "t1";' # When using `BY`, require OF to be specified. with pytest.raises(BQLError): bql2sql(''' estimate predictive relevance to hypothetical rows with values ( ("age" = 82, "weight" = 14), ("age" = 74, label = 'Europe', "weight" = 7) ) in the context of "age" by p1 ''') # When using `FROM`, require OF to be unspecified. with pytest.raises(BQLError): bql2sql(''' estimate predictive relevance of (name = 'mansour') to hypothetical rows with values ( ("age" = 82, "weight" = 14) ) in the context of "age" from p1 ''') assert bql2sql(''' estimate label from p1 where (predictive relevance to existing rows (label = 'quux' and age < 5) in the context of "weight") > 1 order by predictive relevance to hypothetical rows with values ((label='zot')) in the context of "age" ''') == \ 'SELECT "label" FROM "t1" WHERE '\ '(bql_row_predictive_relevance(1, NULL, NULL, '\ '_rowid_, \'[5]\', 3) > 1) '\ 'ORDER BY bql_row_predictive_relevance(1, NULL, NULL, '\ '_rowid_, \'[]\', 2, 1, \'zot\', NULL);' @stochastic(max_runs=2, min_passes=1) def test_conditional_probability(seed): with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id): bdb.execute('drop generator p1_cc') bdb.execute('drop population p1') bdb.execute(''' create population p1 for t1 ( ignore id, label; set stattype of age to numerical; set stattype of weight to numerical ) ''') bdb.execute(''' create generator p1_cond_prob_cc for p1; ''') bdb.execute('initialize 1 model for p1_cond_prob_cc') bdb.execute('alter generator p1_cond_prob_cc ' 'ensure variables * dependent') bdb.execute('analyze p1_cond_prob_cc for 1 iteration') q0 = 'estimate probability density of age = 8 by p1' q1 = 'estimate probability density of age = 8 given () by p1' age_is_8 = bdb.execute(q0).fetchvalue() assert age_is_8 == bdb.execute(q1).fetchvalue() q2 = 'estimate probability density of age = 8 given (weight = 16)' \ ' by p1' age_is_8_given_weight_is_16 = bdb.execute(q2).fetchvalue() assert age_is_8 < age_is_8_given_weight_is_16 probs = bdb.execute( 'estimate probability density of value 8 given (weight = 16)' ' from columns of p1 where v.name != \'weight\'').fetchall() assert [(age_is_8_given_weight_is_16,)] == probs @stochastic(max_runs=2, min_passes=1) def test_joint_probability(seed): with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id): bdb.execute('initialize 10 models for p1_cc') bdb.execute('analyze p1_cc for 10 iterations') q0 = 'estimate probability density of age = 8 by p1' q1 = 'estimate probability density of (age = 8) by p1' assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue() q1 = 'estimate probability density of (age = 8) given () by p1' assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue() q2 = 'estimate probability density of age = 8 given (weight = 16)' \ ' by p1' assert bdb.execute(q0).fetchvalue() < bdb.execute(q2).fetchvalue() q0 = 'estimate probability density of age = 8 by p1' q1 = 'estimate probability density of (age = 8, weight = 16) by p1' assert bdb.execute(q1).fetchvalue() < bdb.execute(q0).fetchvalue() q2 = 'estimate probability density of (age = 8, weight = 16)' \ " given (label = 'mumble') by p1" assert bdb.execute(q1).fetchvalue() < bdb.execute(q2).fetchvalue() def test_badbql(): with test_core.t1() as (bdb, _population_id, _generator_id): with pytest.raises(ValueError): bdb.execute('') with pytest.raises(ValueError): bdb.execute(';') with pytest.raises(ValueError): bdb.execute('select 0; select 1') def test_select_trivial(): assert bql2sql('select null;') == 'SELECT NULL;' assert bql2sql("select 'x';") == "SELECT 'x';" assert bql2sql("select 'x''y';") == "SELECT 'x''y';" assert bql2sql('select "x";') == 'SELECT "x";' assert bql2sql('select "x""y";') == 'SELECT "x""y";' assert bql2sql('select 0;') == 'SELECT 0;' assert bql2sql('select 0.;') == 'SELECT 0.0;' assert bql2sql('select .0;') == 'SELECT 0.0;' assert bql2sql('select 0.0;') == 'SELECT 0.0;' assert bql2sql('select 1e0;') == 'SELECT 1.0;' assert bql2sql('select 1e+1;') == 'SELECT 10.0;' assert bql2sql('select 1e-1;') == 'SELECT 0.1;' assert bql2sql('select -1e+1;') == 'SELECT (- 10.0);' assert bql2sql('select +1e-1;') == 'SELECT (+ 0.1);' assert bql2sql('select SQRT(1-EXP(-2*value)) FROM bm_mi;') == \ 'SELECT "SQRT"((1 - "EXP"(((- 2) * "value")))) FROM "bm_mi";' assert bql2sql('select .1e0;') == 'SELECT 0.1;' assert bql2sql('select 1.e10;') == 'SELECT 10000000000.0;' assert bql2sql('select all 0;') == 'SELECT 0;' assert bql2sql('select distinct 0;') == 'SELECT DISTINCT 0;' assert bql2sql('select 0 as z;') == 'SELECT 0 AS "z";' assert bql2sql('select * from t;') == 'SELECT * FROM "t";' assert bql2sql('select t.* from t;') == 'SELECT "t".* FROM "t";' assert bql2sql('select c from t;') == 'SELECT "c" FROM "t";' assert bql2sql('select c as d from t;') == 'SELECT "c" AS "d" FROM "t";' assert bql2sql('select t.c as d from t;') == \ 'SELECT "t"."c" AS "d" FROM "t";' assert bql2sql('select t.c as d, p as q, x from t;') == \ 'SELECT "t"."c" AS "d", "p" AS "q", "x" FROM "t";' assert bql2sql('select * from t, u;') == 'SELECT * FROM "t", "u";' assert bql2sql('select * from t as u;') == 'SELECT * FROM "t" AS "u";' assert bql2sql('select * from (select 0);') == 'SELECT * FROM (SELECT 0);' assert bql2sql('select t.c from (select d as c from u) as t;') == \ 'SELECT "t"."c" FROM (SELECT "d" AS "c" FROM "u") AS "t";' assert bql2sql('select * where x;') == 'SELECT * WHERE "x";' assert bql2sql('select * from t where x;') == \ 'SELECT * FROM "t" WHERE "x";' assert bql2sql('select * group by x;') == 'SELECT * GROUP BY "x";' assert bql2sql('select * from t where x group by y;') == \ 'SELECT * FROM "t" WHERE "x" GROUP BY "y";' assert bql2sql('select * from t where x group by y, z;') == \ 'SELECT * FROM "t" WHERE "x" GROUP BY "y", "z";' assert bql2sql('select * from t where x group by y having sum(z) < 1') == \ 'SELECT * FROM "t" WHERE "x" GROUP BY "y" HAVING ("sum"("z") < 1);' assert bql2sql('select * order by x;') == 'SELECT * ORDER BY "x";' assert bql2sql('select * order by x asc;') == 'SELECT * ORDER BY "x";' assert bql2sql('select * order by x desc;') == \ 'SELECT * ORDER BY "x" DESC;' assert bql2sql('select * order by x, y;') == 'SELECT * ORDER BY "x", "y";' assert bql2sql('select * order by x desc, y;') == \ 'SELECT * ORDER BY "x" DESC, "y";' assert bql2sql('select * order by x, y asc;') == \ 'SELECT * ORDER BY "x", "y";' assert bql2sql('select * limit 32;') == 'SELECT * LIMIT 32;' assert bql2sql('select * limit 32 offset 16;') == \ 'SELECT * LIMIT 32 OFFSET 16;' assert bql2sql('select * limit 16, 32;') == 'SELECT * LIMIT 32 OFFSET 16;' assert bql2sql('select (select0);') == 'SELECT "select0";' assert bql2sql('select (select 0);') == 'SELECT (SELECT 0);' assert bql2sql('select f(f(), f(x), y);') == \ 'SELECT "f"("f"(), "f"("x"), "y");' assert bql2sql('select a and b or c or not d is e is not f like j;') == \ 'SELECT ((("a" AND "b") OR "c") OR' \ + ' (NOT ((("d" IS "e") IS NOT "f") LIKE "j")));' assert bql2sql('select a like b not like c like d escape e;') == \ 'SELECT ((("a" LIKE "b") NOT LIKE "c") LIKE "d" ESCAPE "e");' assert bql2sql('select a like b escape c glob d not glob e;') == \ 'SELECT ((("a" LIKE "b" ESCAPE "c") GLOB "d") NOT GLOB "e");' assert bql2sql('select a not glob b glob c escape d;') == \ 'SELECT (("a" NOT GLOB "b") GLOB "c" ESCAPE "d");' assert bql2sql('select a glob b escape c regexp e not regexp f;') == \ 'SELECT ((("a" GLOB "b" ESCAPE "c") REGEXP "e") NOT REGEXP "f");' assert bql2sql('select a not regexp b regexp c escape d;') == \ 'SELECT (("a" NOT REGEXP "b") REGEXP "c" ESCAPE "d");' assert bql2sql('select a regexp b escape c not regexp d escape e;') == \ 'SELECT (("a" REGEXP "b" ESCAPE "c") NOT REGEXP "d" ESCAPE "e");' assert bql2sql('select a not regexp b escape c match e not match f;') == \ 'SELECT ((("a" NOT REGEXP "b" ESCAPE "c") MATCH "e") NOT MATCH "f");' assert bql2sql('select a not match b match c escape d;') == \ 'SELECT (("a" NOT MATCH "b") MATCH "c" ESCAPE "d");' assert bql2sql('select a match b escape c not match d escape e;') == \ 'SELECT (("a" MATCH "b" ESCAPE "c") NOT MATCH "d" ESCAPE "e");' assert bql2sql('select a not match b escape c between d and e;') == \ 'SELECT (("a" NOT MATCH "b" ESCAPE "c") BETWEEN "d" AND "e");' assert bql2sql('select a between b and c and d;') == \ 'SELECT (("a" BETWEEN "b" AND "c") AND "d");' assert bql2sql('select a like b like c escape d between e and f;') == \ 'SELECT ((("a" LIKE "b") LIKE "c" ESCAPE "d") BETWEEN "e" AND "f");' assert bql2sql('select a between b and c not between d and e;') == \ 'SELECT (("a" BETWEEN "b" AND "c") NOT BETWEEN "d" AND "e");' assert bql2sql('select a not between b and c in (select f);') == \ 'SELECT (("a" NOT BETWEEN "b" AND "c") IN (SELECT "f"));' assert bql2sql('select a in (select b) and c not in (select d);') == \ 'SELECT (("a" IN (SELECT "b")) AND ("c" NOT IN (SELECT "d")));' assert bql2sql("select a in (1 + 2, '3') and b not in (select c);") == \ 'SELECT (("a" IN ((1 + 2), \'3\')) AND ("b" NOT IN (SELECT "c")));' assert bql2sql('select a in (select b) isnull notnull!=c<>d<e<=f>g;') == \ 'SELECT ((((("a" IN (SELECT "b")) ISNULL) NOTNULL) != "c") !=' \ + ' ((("d" < "e") <= "f") > "g"));' assert bql2sql('select a>b>=c<<d>>e&f|g+h-i*j/k;') == \ 'SELECT (("a" > "b") >= (((("c" << "d") >> "e") & "f") |' \ + ' (("g" + "h") - (("i" * "j") / "k"))));' assert bql2sql('select a/b%c||~~d collate e collate\'f\'||1;') == \ 'SELECT (("a" / "b") % (("c" || (((~ (~ "d")) COLLATE "e")' \ + ' COLLATE "f")) || 1));' assert bql2sql('select cast(f(x) as binary blob);') == \ 'SELECT CAST("f"("x") AS "binary" "blob");' assert bql2sql('select cast(42 as varint(73));') == \ 'SELECT CAST(42 AS "varint"(73));' assert bql2sql('select cast(f(x, y, z) as varchar(12 ,34));') == \ 'SELECT CAST("f"("x", "y", "z") AS "varchar"(12, 34));' assert bql2sql('select exists (select a) and not exists (select b);') == \ 'SELECT (EXISTS (SELECT "a") AND (NOT EXISTS (SELECT "b")));' assert bql2sql('select case when a - b then c else d end from t;') == \ 'SELECT CASE WHEN ("a" - "b") THEN "c" ELSE "d" END FROM "t";' assert bql2sql('select case f(a) when b + c then d else e end from t;') \ == \ 'SELECT CASE "f"("a") WHEN ("b" + "c") THEN "d" ELSE "e" END FROM "t";' def test_estimate_bql(): # PREDICTIVE PROBABILITY assert bql2sql('estimate predictive probability of weight from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[3]\', \'[]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of (age, weight) ' 'from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[2, 3]\', \'[]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of (age, weight) given ' '(label) from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[2, 3]\', \'[1]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of (*) from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[1, 2, 3]\', \'[]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of (*) given (age, weight) ' 'from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[1]\', \'[2, 3]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of age given (*) ' 'from p1;') == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[2]\', \'[1, 3]\')' \ ' FROM "t1";' assert bql2sql('estimate label, predictive probability of weight' ' from p1;') \ == \ 'SELECT "label", ' \ 'bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[3]\', \'[]\')' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of weight, label' ' from p1;') \ == \ 'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\ '\'[3]\', \'[]\'),' \ ' "label"' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of weight + 1' ' from p1;') == \ 'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\ '_rowid_, \'[3]\', \'[]\') + 1)' \ ' FROM "t1";' assert bql2sql('estimate predictive probability of weight given (*) + 1' ' from p1;') == \ 'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\ '_rowid_, \'[3]\', \'[1, 2]\') + 1)' \ ' FROM "t1";' # PREDICTIVE PROBABILITY parse and compilation errors. with pytest.raises(parse.BQLParseError): # Need a table. bql2sql('estimate predictive probability of weight;') with pytest.raises(parse.BQLParseError): # Need at most one generator. bql2sql('estimate predictive probability of weight' ' from p1, p1;') with pytest.raises(parse.BQLParseError): # Need a generator name, not a subquery. bql2sql('estimate predictive probability of weight' ' from (select 0);') with pytest.raises(parse.BQLParseError): # Need a column. bql2sql('estimate predictive probability from p1;') with pytest.raises(bayeslite.BQLError): # Using (*) in both targets and constraints. bql2sql('estimate predictive probability of (*) given (*) from p1;') with pytest.raises(bayeslite.BQLError): # Using (weight, *) in targets. bql2sql('estimate predictive probability of (weight, *) given (age) ' 'from p1;') with pytest.raises(bayeslite.BQLError): # Using (age, *) in constraints. bql2sql('estimate predictive probability of weight given (*, age) ' 'from p1;') with pytest.raises(bayeslite.BQLError): # Using duplicate column age. bql2sql('estimate predictive probability of age given (weight, age) ' 'from p1;') # PROBABILITY DENISTY. assert bql2sql('estimate probability density of weight = 20 from p1;') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20) FROM "t1";' assert bql2sql('estimate probability density of weight = 20' ' given (age = 8)' ' from p1;') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, NULL, 2, 8) FROM "t1";' assert bql2sql('estimate probability density of (weight = 20, age = 8)' ' from p1;') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8) FROM "t1";' assert bql2sql('estimate probability density of (weight = 20, age = 8)' " given (label = 'mumble') from p1;") == \ "SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8, NULL, 1, 'mumble')" \ ' FROM "t1";' assert bql2sql('estimate probability density of weight = (c + 1)' ' from p1;') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 3, ("c" + 1)) FROM "t1";' assert bql2sql('estimate probability density of weight = f(c)' ' from p1;') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 3, "f"("c")) FROM "t1";' assert bql2sql('estimate similarity to (rowid = 5) ' 'in the context of weight from p1;') == \ 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";' assert bql2sql( 'estimate similarity of (rowid = 12) to (rowid = 5) ' 'in the context of weight from p1;') == \ 'SELECT bql_row_similarity(1, NULL, NULL,' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 12)),' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";' assert bql2sql('estimate similarity to (rowid = 5) in the context of age' ' from p1') == \ 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";' assert bql2sql( 'estimate similarity of (rowid = 5) to (height = 7 and age < 10)' ' in the context of weight from p1;') == \ 'SELECT bql_row_similarity(1, NULL, NULL,' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)),' \ ' (SELECT _rowid_ FROM "t1" WHERE (("height" = 7) AND ("age" < 10))),' \ ' 3) FROM "t1";' with pytest.raises(bayeslite.BQLError): # Cannot use all variables for similarity. bql2sql( 'estimate similarity to (rowid = 5) in the context of * from p1;') assert bql2sql('estimate similarity to (rowid = 5)' ' in the context of age from p1;') == \ 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \ ' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";' assert bql2sql('estimate dependence probability of age with weight' ' from p1;') == \ 'SELECT bql_column_dependence_probability(1, NULL, NULL, 2, 3) '\ 'FROM "t1";' with pytest.raises(bayeslite.BQLError): # Need both rows fixed. bql2sql('estimate similarity to (rowid=2) in the context of r by p1') with pytest.raises(bayeslite.BQLError): # Need both rows fixed. bql2sql('estimate similarity in the context of r within p1') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate dependence probability with age from p1;') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate dependence probability from p1;') assert bql2sql('estimate mutual information of age with weight' + ' from p1;') == \ 'SELECT bql_column_mutual_information('\ '1, NULL, NULL, \'[2]\', \'[3]\', NULL)'\ ' FROM "t1";' assert bql2sql('estimate mutual information of age with weight' + ' using 42 samples from p1;') == \ 'SELECT bql_column_mutual_information('\ '1, NULL, NULL, \'[2]\', \'[3]\', 42)'\ ' FROM "t1";' with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate mutual information with age from p1;') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate mutual information from p1;') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate mutual information with age using 42 samples' ' from p1;') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate mutual information using 42 samples from p1;') # XXX Should be SELECT, not ESTIMATE, here? assert bql2sql('estimate correlation of age with weight from p1;') == \ 'SELECT bql_column_correlation(1, NULL, NULL, 2, 3) FROM "t1";' with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate correlation with age from p1;') with pytest.raises(bayeslite.BQLError): # Need both columns fixed. bql2sql('estimate correlation from p1;') with pytest.raises(BQLError): # Variable must exist. bql2sql('estimate correlation with agee from variables of p1') def test_predict_outside_infer(): with pytest.raises(bayeslite.BQLError): # No PREDICT outside INFER. bql2sql('estimate predict age with confidence 0.9 from p1;') def test_infer_explicit_predict_confidence(): assert bql2sql('infer explicit predict age with confidence 0.9' ' from p1;') == \ 'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL) FROM "t1";' def test_infer_explicit_predict_confidence_nsamples(): assert bql2sql('infer explicit' ' predict age with confidence 0.9 using 42 samples' ' from p1;') == \ 'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42) FROM "t1";' def test_infer_explicit_verbatim_and_predict_confidence(): assert bql2sql('infer explicit rowid, age,' ' predict age confidence age_conf from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age",' \ ' bql_json_get(c2, \'confidence\') AS "age_conf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_noconfidence(): assert bql2sql('infer explicit rowid, age,' ' predict age from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_confidence_nsamples(): assert bql2sql('infer explicit rowid, age,' ' predict age confidence age_conf using 42 samples from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age",' \ ' bql_json_get(c2, \'confidence\') AS "age_conf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_noconfidence_nsamples(): assert bql2sql('infer explicit rowid, age,' ' predict age using 42 samples from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_confidence_as(): assert bql2sql('infer explicit rowid, age,' ' predict age as age_inf confidence age_conf from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age_inf",' \ ' bql_json_get(c2, \'confidence\') AS "age_conf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_noconfidence_as(): assert bql2sql('infer explicit rowid, age,' ' predict age as age_inf from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age_inf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_confidence_as_nsamples(): assert bql2sql('infer explicit rowid, age,' ' predict age as age_inf confidence age_conf using 87 samples' ' from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age_inf",' \ ' bql_json_get(c2, \'confidence\') AS "age_conf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \ ' AS c2 FROM "t1");' def test_infer_explicit_verbatim_and_predict_noconfidence_as_nsamples(): assert bql2sql('infer explicit rowid, age,' ' predict age as age_inf using 87 samples' ' from p1') == \ 'SELECT c0 AS "rowid", c1 AS "age",' \ ' bql_json_get(c2, \'value\') AS "age_inf"' \ ' FROM (SELECT "rowid" AS c0, "age" AS c1,' \ ' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \ ' AS c2 FROM "t1");' def test_infer_auto(): assert bql2sql('infer rowid, age, weight from p1') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \ ' AS "weight"' \ ' FROM "t1";' def test_infer_auto_nsamples(): assert bql2sql('infer rowid, age, weight using (1+2) samples from p1') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, (1 + 2)))' \ ' AS "age",' \ ' "IFNULL"("weight",'\ ' bql_predict(1, NULL, NULL, _rowid_, 3, 0, (1 + 2)))' \ ' AS "weight"' \ ' FROM "t1";' def test_infer_auto_with_confidence(): assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight",'\ ' bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, NULL))' \ ' AS "weight"' \ ' FROM "t1";' def test_infer_auto_with_confidence_nsamples(): assert bql2sql('infer rowid, age, weight with confidence 0.9' ' using sqrt(2) samples' ' from p1') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9,' \ ' "sqrt"(2)))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \ ' "sqrt"(2)))' \ ' AS "weight"' \ ' FROM "t1";' def test_infer_auto_with_confidence_where(): assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1' ' where label = \'foo\'') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'\ ' NULL))' \ ' AS "weight"' \ ' FROM "t1"' \ ' WHERE ("label" = \'foo\');' def test_infer_auto_with_confidence_nsamples_where(): assert bql2sql('infer rowid, age, weight with confidence 0.9' ' using 42 samples' ' from p1' ' where label = \'foo\'') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \ ' AS "weight"' \ ' FROM "t1"' \ ' WHERE ("label" = \'foo\');' def test_infer_auto_with_confidence_nsamples_where_predict(): assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1' ' where ifnull(label, predict label with confidence 0.7)' ' = \'foo\'') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \ ' NULL))' \ ' AS "weight"' \ ' FROM "t1"' \ ' WHERE ("ifnull"("label",' \ ' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, NULL))' \ ' = \'foo\');' def test_infer_auto_with_confidence_nsamples_where_predict_nsamples(): assert bql2sql('infer rowid, age, weight with confidence 0.9' ' using 42 samples' ' from p1' ' where ifnull(label, predict label with confidence 0.7' ' using 73 samples)' ' = \'foo\'') \ == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \ ' AS "weight"' \ ' FROM "t1"' \ ' WHERE ("ifnull"("label",' \ ' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, 73))' \ ' = \'foo\');' def test_infer_auto_star(): assert bql2sql('infer rowid, * from p1') == \ 'SELECT "rowid" AS "rowid", "id" AS "id",' \ ' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, NULL))' \ ' AS "label",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \ ' AS "weight"' \ ' FROM "t1";' def test_infer_auto_star_nsamples(): assert bql2sql('infer rowid, * using 1 samples from p1') == \ 'SELECT "rowid" AS "rowid", "id" AS "id",' \ ' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, 1))' \ ' AS "label",' \ ' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, 1))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, 1))' \ ' AS "weight"' \ ' FROM "t1";' def test_estimate_columns_trivial(): prefix0 = 'SELECT v.name AS name' prefix1 = ' FROM bayesdb_variable AS v' \ ' WHERE v.population_id = 1' \ ' AND v.generator_id IS NULL' prefix = prefix0 + prefix1 assert bql2sql('estimate * from columns of p1;') == \ prefix + ';' assert bql2sql('estimate * from columns of p1 where' + ' (probability density of value 42) > 0.5') == \ prefix + \ ' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 42) > 0.5);' assert bql2sql('estimate * from columns of p1' ' where (probability density of value 8)' ' > (probability density of age = 16)') == \ prefix + \ ' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 8) >' \ ' bql_pdf_joint(1, NULL, NULL, 2, 16));' assert bql2sql('estimate *, probability density of value 8 given (age = 8)' ' from columns of p1;') == \ prefix0 + \ ', bql_column_value_probability(1, NULL, NULL, v.colno, 8, 2, 8)' + \ prefix1 + ';' with pytest.raises(bayeslite.BQLError): bql2sql('estimate probability density of value 8 given (agee = 8)' ' from columns of p1') with pytest.raises(bayeslite.BQLError): # PREDICTIVE PROBABILITY makes no sense without row. bql2sql('estimate * from columns of p1 where' + ' predictive probability of x > 0;') with pytest.raises(bayeslite.BQLError): # SIMILARITY makes no sense without row. bql2sql('estimate * from columns of p1 where' + ' similarity to (rowid = x) in the context of c > 0;') assert bql2sql('estimate * from columns of p1 where' + ' dependence probability with age > 0.5;') == \ prefix + \ ' AND (bql_column_dependence_probability(1, NULL, NULL, 2, v.colno)' \ ' > 0.5);' with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 where' + ' dependence probability of age with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1' ' where dependence probability > 0.5;') assert bql2sql('estimate * from columns of p1 order by' + ' mutual information with age;') == \ prefix + \ ' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2]\','\ ' \'[\' || v.colno || \']\', NULL);' assert bql2sql('estimate * from columns of p1 order by' + ' mutual information with (age, label) using 42 samples;') == \ prefix + \ ' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\ ' \'[\' || v.colno || \']\', 42);' assert bql2sql('estimate * from columns of p1 order by' + ' mutual information with (age, label)' ' given (weight=12) using 42 samples;') == \ prefix + \ ' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\ ' \'[\' || v.colno || \']\', 42, 3, 12);' with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 order by' + ' mutual information of age with weight;') with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1' ' where mutual information > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 order by' + ' mutual information of age with weight using 42 samples;') with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 where' + ' mutual information using 42 samples > 0.5;') assert bql2sql('estimate * from columns of p1 order by' + ' correlation with age desc;') == \ prefix + ' ORDER BY bql_column_correlation(1, NULL, NULL, 2, v.colno)' \ ' DESC;' with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 order by' + ' correlation of age with weight;') with pytest.raises(bayeslite.BQLError): # Must omit exactly one column. bql2sql('estimate * from columns of p1 where correlation > 0.5;') with pytest.raises(bayeslite.BQLError): # Makes no sense. bql2sql('estimate * from columns of p1' ' where predict age with confidence 0.9 > 30;') assert bql2sql('estimate' ' *, dependence probability with weight as depprob,' ' mutual information with weight as mutinf' ' from columns of p1' ' where depprob > 0.5 order by mutinf desc') == \ prefix0 + \ ', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \ ' AS "depprob"' \ ', bql_column_mutual_information(1, NULL, NULL, \'[3]\',' \ ' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \ + prefix1 + \ ' AND ("depprob" > 0.5)' \ ' ORDER BY "mutinf" DESC;' assert bql2sql('estimate' ' *, dependence probability with weight as depprob,' ' mutual information with (age, weight) as mutinf' ' from columns of p1' ' where depprob > 0.5 order by mutinf desc') == \ prefix0 + \ ', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \ ' AS "depprob"' \ ', bql_column_mutual_information(1, NULL, NULL, \'[2, 3]\',' \ ' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \ + prefix1 + \ ' AND ("depprob" > 0.5)' \ ' ORDER BY "mutinf" DESC;' # XXX This mixes up target and reference variables, which is OK, # because MI is symmetric, but...oops. assert bql2sql('estimate * from variables of p1' ' where probability of (mutual information with age < 0.1)' ' > 0.8') == \ prefix + \ ' AND ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \ ' FROM (SELECT mi AS "v0" FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[' || v.colno || ']'))) > 0.8);" assert bql2sql('estimate * from variables of p1' ' order by probability of (mutual information with age < 0.1)') ==\ prefix + \ ' ORDER BY (SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \ ' FROM (SELECT mi AS "v0" FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[' || v.colno || ']')));" def test_estimate_pairwise_trivial(): prefix = 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1, ' infix = ' AS value' infix0 = ' FROM bayesdb_population AS p,' infix0 += ' bayesdb_variable AS v0,' infix0 += ' bayesdb_variable AS v1' infix0 += ' WHERE p.id = 1' infix0 += ' AND v0.population_id = p.id AND v1.population_id = p.id' infix0 += ' AND v0.generator_id IS NULL' infix0 += ' AND v1.generator_id IS NULL' infix += infix0 assert bql2sql('estimate dependence probability' ' from pairwise columns of p1;') == \ prefix + \ 'bql_column_dependence_probability(1, NULL, NULL, v0.colno,'\ ' v1.colno)' + \ infix + ';' assert bql2sql('estimate mutual information' ' from pairwise columns of p1 where' ' (probability density of age = 0) > 0.5;') == \ prefix + \ 'bql_column_mutual_information(1, NULL, NULL, '\ '\'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)' + \ infix + \ ' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);' assert bql2sql('estimate mutual information given (label=\'go\', weight)' ' from pairwise columns of p1 where' ' (probability density of age = 0) > 0.5;') == \ prefix + \ 'bql_column_mutual_information(1, NULL, NULL,'\ ' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL,'\ ' 1, \'go\', 3, NULL)' + \ infix + \ ' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);' with pytest.raises(bayeslite.BQLError): # PROBABILITY DENSITY OF VALUE is 1-column. bql2sql('estimate correlation from pairwise columns of p1 where' + ' (probability density of value 0) > 0.5;') with pytest.raises(bayeslite.BQLError): # PREDICTIVE PROBABILITY OF is a row function. bql2sql('estimate dependence probability' ' from pairwise columns of p1' + ' where predictive probability of x > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' where dependence probability of age with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information from pairwise columns of p1' ' where dependence probability with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information using 42 samples' ' from pairwise columns of p1' ' where dependence probability with weight > 0.5;') assert bql2sql('estimate correlation from pairwise columns of p1' ' where dependence probability > 0.5;') == \ prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \ infix + ' AND' \ ' (bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \ ' v1.colno)' \ ' > 0.5);' with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' where mutual information of age with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' where mutual information of age with weight using 42 samples' ' > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information from pairwise columns of p1' ' where mutual information with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information using 42 samples' ' from pairwise columns of p1' ' where mutual information with weight using 42 samples > 0.5;') assert bql2sql('estimate correlation from pairwise columns of p1' + ' where mutual information > 0.5;') == \ prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \ infix + ' AND' + \ ' (bql_column_mutual_information(1, NULL, NULL,'\ ' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL) > 0.5);' assert bql2sql('estimate correlation from pairwise columns of p1' + ' where mutual information using 42 samples > 0.5;') == \ prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \ infix + ' AND' + \ ' (bql_column_mutual_information(1, NULL, NULL,'\ ' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', 42) > 0.5);' with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' where correlation of age with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information from pairwise columns of p1' ' where correlation with weight > 0.5;') with pytest.raises(bayeslite.BQLError): # Must omit both columns. bql2sql('estimate mutual information using 42 samples' ' from pairwise columns of p1' ' where correlation with weight > 0.5;') assert bql2sql('estimate correlation from pairwise columns of p1' ' where correlation > 0.5;') == \ prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \ infix + ' AND' + \ ' (bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno) > 0.5);' with pytest.raises(bayeslite.BQLError): # Makes no sense. bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' where predict age with confidence 0.9 > 30;') assert bql2sql('estimate dependence probability as depprob,' ' mutual information as mutinf' ' from pairwise columns of p1' ' where depprob > 0.5 order by mutinf desc') == \ prefix + \ 'bql_column_dependence_probability(1, NULL, NULL, v0.colno, v1.colno)' \ ' AS "depprob",' \ ' bql_column_mutual_information(1, NULL, NULL,'\ ' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'\ ' AS "mutinf"' \ + infix0 + \ ' AND ("depprob" > 0.5)' \ ' ORDER BY "mutinf" DESC;' def test_estimate_pairwise_row(): prefix = 'SELECT r0._rowid_ AS rowid0, r1._rowid_ AS rowid1' infix = ' AS value FROM "t1" AS r0, "t1" AS r1' assert bql2sql('estimate similarity in the context of age' + ' from pairwise p1;') == \ prefix + ', bql_row_similarity(1, NULL, NULL,'\ ' r0._rowid_, r1._rowid_, 2)' + \ infix + ';' with pytest.raises(bayeslite.BQLError): # PREDICT is a 1-row function. bql2sql('estimate predict age with confidence 0.9 from pairwise t1;') def test_estimate_pairwise_selected_columns(): assert bql2sql('estimate dependence probability' ' from pairwise columns of p1 for label, age') == \ 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \ ' bql_column_dependence_probability(1, NULL, NULL,' \ ' v0.colno, v1.colno)' \ ' AS value' \ ' FROM bayesdb_population AS p,' \ ' bayesdb_variable AS v0,' \ ' bayesdb_variable AS v1' \ ' WHERE p.id = 1' \ ' AND v0.population_id = p.id AND v1.population_id = p.id' \ ' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \ ' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);' assert bql2sql('estimate dependence probability' ' from pairwise columns of p1' ' for (ESTIMATE * FROM COLUMNS OF p1' ' ORDER BY name DESC LIMIT 2)') == \ 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \ ' bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \ ' v1.colno)' \ ' AS value' \ ' FROM bayesdb_population AS p,' \ ' bayesdb_variable AS v0,' \ ' bayesdb_variable AS v1' \ ' WHERE p.id = 1' \ ' AND v0.population_id = p.id AND v1.population_id = p.id' \ ' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \ ' AND v0.colno IN (3, 1) AND v1.colno IN (3, 1);' def test_select_columns_subquery(): assert bql2sql('select id, t1.(estimate * from columns of p1' ' order by name asc limit 2) from t1') == \ 'SELECT "id", "t1"."age", "t1"."label" FROM "t1";' @pytest.mark.xfail(strict=True, reason='no simulate vars from models of') def test_simulate_models_columns_subquery(): assert bql2sql('simulate weight, t1.(estimate * from columns of p1' ' order by name asc limit 2) from models of p1') == \ 'SELECT * FROM "bayesdb_temp_0";' assert bql2sql('simulate 0, weight, t1.(estimate * from columns of p1' ' order by name asc limit 2) from models of p1') == \ 'SELECT 0, "v0" AS "weight", "v1" AS "age", "v2" AS "label" FROM' \ ' (SELECT * FROM "bayesdb_temp_0");' assert bql2sql('simulate weight + 1, t1.(estimate * from columns of p1' ' order by name asc limit 2) from models of p1') == \ 'SELECT ("v0" + 1), "v1" AS "age", "v2" AS "label" FROM' \ ' (SELECT * FROM "bayesdb_temp_0");' assert bql2sql('simulate weight + 1 AS wp1,' ' t1.(estimate * from columns of p1' ' order by name asc limit 2) from models of p1') == \ 'SELECT ("v0" + 1) AS "wp1", "v1" AS "age", "v2" AS "label" FROM' \ ' (SELECT * FROM "bayesdb_temp_0");' def test_simulate_columns_subquery(): # XXX This test is a little unsatisfactory -- we do not get to see # what the variables in the result are named... assert bql2sql('simulate weight, t1.(estimate * from columns of p1' ' order by name asc limit 2) from p1 limit 10') == \ 'SELECT * FROM "bayesdb_temp_0";' with pytest.raises(parse.BQLParseError): # Compound columns not yet implemented for SIMULATE. bql2sql('simulate weight + 1, t1.(estimate * from columns of p1' ' order by name asc limit 2) from p1 limit 10') def test_simulate_models(): # Base case. assert bql2sql('simulate mutual information of age with weight' ' from models of p1') == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]';" # Multiple target variables. assert bql2sql('simulate mutual information of (label, age) with weight' ' from models of p1') == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[1, 2]'" \ " AND reference_vars = '[3]';" # Multiple reference variables. assert bql2sql('simulate mutual information of age with (label, weight)' ' from models of p1') == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[1, 3]';" # Specified number of samples. assert bql2sql('simulate mutual information of age with weight' ' using 42 samples from models of p1') == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]'" \ ' AND nsamples = 42;' # Conditional. assert bql2sql('simulate mutual information of age with weight' " given (label = 'foo') from models of p1") == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]'" \ " AND conditions = '{\"1\": \"foo\"}';" # Modeled by a specific generator. assert bql2sql('simulate mutual information of age with weight' ' from models of p1 modeled by g1', lambda bdb: bdb.execute('create generator g1 for p1')) == \ 'SELECT mi FROM bql_mutinf' \ ' WHERE population_id = 1' \ ' AND generator_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]';" # Two mutual informations. assert bql2sql('simulate mutual information of age with weight AS "mi(aw)",' ' mutual information of label with weight AS "mi(lw)"' ' from models of p1') == \ 'SELECT t0."mi(aw)" AS "mi(aw)", t1."mi(lw)" AS "mi(lw)"' \ ' FROM (SELECT _rowid_, mi AS "mi(aw)" FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]') AS t0," \ ' (SELECT _rowid_, mi AS "mi(lw)" FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[1]'" \ " AND reference_vars = '[3]') AS t1" \ ' WHERE t0._rowid_ = t1._rowid_;' def test_probability_of_mutinf(): assert bql2sql('estimate probability of' ' (mutual information of age with weight < 0.1) > 0.5' ' within p1') == \ 'SELECT ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \ ' FROM (SELECT mi AS "v0" FROM bql_mutinf' \ ' WHERE population_id = 1' \ " AND target_vars = '[2]'" \ " AND reference_vars = '[3]'))) > 0.5);" def test_modeledby_usingmodels_trival(): def setup(bdb): bdb.execute('create generator m1 for p1 using cgpm;') assert bql2sql('estimate predictive probability of weight + 1' ' from p1 modeled by m1 using models 1-3, 5;', setup=setup) == \ 'SELECT (bql_row_column_predictive_probability(1, 1, \'[1, 2, 3, 5]\','\ ' _rowid_, \'[3]\', \'[]\') + 1)' \ ' FROM "t1";' assert bql2sql( 'infer rowid, age, weight from p1 modeled by m1 using model 7', setup=setup) == \ 'SELECT "rowid" AS "rowid",' \ ' "IFNULL"("age", bql_predict(1, 1, \'[7]\', _rowid_, 2, 0, NULL))' \ ' AS "age",' \ ' "IFNULL"("weight", bql_predict(1, 1, \'[7]\', _rowid_, 3, 0, NULL))' \ ' AS "weight"' \ ' FROM "t1";' assert bql2sql('infer explicit predict age with confidence 0.9' ' from p1 using models 0, 3-5;', setup=setup) == \ 'SELECT bql_predict(1, NULL, \'[0, 3, 4, 5]\', _rowid_, 2, 0.9, NULL)'\ ' FROM "t1";' assert bql2sql(''' estimate predictive relevance of (label = 'Uganda') to existing rows (rowid < 4) and hypothetical rows with values ( ("age" = 82, "weight" = 14), ("age" = 74, label = 'Europe', "weight" = 7) ) in the context of "weight" by p1 modeled by m1 using models 8, 10-12 ''', setup=setup) == \ 'SELECT bql_row_predictive_relevance(1, 1, \'[8, 10, 11, 12]\', ' \ '(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\ '\'[1, 2, 3]\', 3, '\ '2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);' assert bql2sql(''' estimate dependence probability from pairwise columns of p1 for label, age modeled by m1 using models 1, 4, 12 ''', setup=setup) == \ 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \ ' bql_column_dependence_probability(1, 1, \'[1, 4, 12]\',' \ ' v0.colno, v1.colno)' \ ' AS value' \ ' FROM bayesdb_population AS p,' \ ' bayesdb_variable AS v0,' \ ' bayesdb_variable AS v1' \ ' WHERE p.id = 1' \ ' AND v0.population_id = p.id AND v1.population_id = p.id' \ ' AND (v0.generator_id IS NULL OR v0.generator_id = 1)' \ ' AND (v1.generator_id IS NULL OR v1.generator_id = 1)' \ ' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);' assert bql2sql(''' estimate mutual information of age with weight from p1 modeled by m1 using model 1; ''', setup=setup) == \ 'SELECT bql_column_mutual_information('\ '1, 1, \'[1]\', \'[2]\', \'[3]\', NULL)'\ ' FROM "t1";' def test_simulate_columns_all(): with pytest.raises(parse.BQLParseError): bql2sql('simulate * from p1 limit 1') def test_trivial_commands(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): # XXX Query parameters! with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) with open(fname, 'rU') as f: with pytest.raises(ValueError): bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True, ifnotexists=True) guess.bayesdb_guess_population(bdb, 'p', 't') with pytest.raises(ValueError): guess.bayesdb_guess_population(bdb, 'p', 't') guess.bayesdb_guess_population(bdb, 'p', 't', ifnotexists=True) bdb.execute('create generator p_cc for p;') bdb.execute('initialize 2 models for p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('initialize 2 models for p_cc') bdb.execute('drop models from p_cc') bdb.execute('drop models from p_cc') bdb.execute('initialize 2 models for p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('initialize 2 models for p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('drop models 0-2 from p_cc') bdb.execute('drop models 0-1 from p_cc') with bdb.savepoint(): bdb.execute('initialize 2 models for p_cc') bdb.execute('drop models 0-1 from p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('drop models 0-1 from p_cc') bdb.execute('initialize 2 models for p_cc') bdb.execute('initialize 1 model if not exists for p_cc') bdb.execute('initialize 2 models if not exists for p_cc') population_id = core.bayesdb_get_population(bdb, 'p') generator_id = core.bayesdb_get_generator(bdb, population_id, 'p_cc') assert core.bayesdb_generator_table(bdb, generator_id) == 't' bdb.execute('alter table t rename to t') assert core.bayesdb_generator_table(bdb, generator_id) == 't' bdb.execute('alter table t rename to T') assert core.bayesdb_generator_table(bdb, generator_id) == 'T' bdb.execute('alter population p rename to p') assert core.bayesdb_population_name(bdb, population_id) == 'p' bdb.execute('alter population p rename to p2') assert core.bayesdb_population_name(bdb, population_id) == 'p2' bdb.execute('alter population p2 rename to p') assert core.bayesdb_population_name(bdb, population_id) == 'p' bdb.execute('estimate count(*) from p').fetchall() bdb.execute('alter table t rename to t') assert core.bayesdb_generator_table(bdb, generator_id) == 't' bdb.execute('alter generator p_cc rename to p0_cc') assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc' bdb.execute('alter generator p0_cc rename to zot, rename to P0_CC') assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_CC' bdb.execute('alter generator P0_cc rename to P0_cc') assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_cc' bdb.execute('alter generator p0_CC rename to p0_cc') assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc' bdb.execute('estimate count(*) from p').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('estimate count(*) from p_cc') bdb.execute('alter generator p0_cc rename to P0_cc') bdb.execute('analyze p0_cc for 1 iteration') colno = core.bayesdb_variable_number(bdb, population_id, generator_id, 'gender') with pytest.raises(parse.BQLParseError): # Rename the table's columns, not the generator's columns. bdb.execute('alter generator p0_cc rename gender to sex') with pytest.raises(NotImplementedError): # XXX bdb.execute('alter table t rename to t0, rename gender to sex') assert core.bayesdb_variable_number( bdb, population_id, generator_id, 'sex') \ == colno bdb.execute('analyze p0_cc model 0 for 1 iteration') bdb.execute('alter generator p0_cc rename to p_cc') assert core.bayesdb_variable_number( bdb, population_id, generator_id, 'sex') \ == colno bdb.execute('select sex from t0').fetchall() with pytest.raises(AssertionError): # XXX bdb.execute('select gender from t0') assert False, 'Need to fix quoting of unknown columns!' with pytest.raises(bayeslite.BQLError): bdb.execute('estimate predict sex with confidence 0.9' ' from p').fetchall() bdb.execute('infer explicit predict sex with confidence 0.9' ' from p').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('estimate predict gender with confidence 0.9' ' from p') with pytest.raises(bayeslite.BQLError): bdb.execute('infer explicit predict gender with confidence 0.9' ' from p') bdb.execute('alter table t0 rename sex to gender') assert core.bayesdb_variable_number( bdb, population_id, generator_id, 'gender') \ == colno bdb.execute('alter generator p0_cc rename to p_cc') # XXX bdb.execute('alter table t rename to T0') # XXX bdb.sql_execute('create table t0_temp(x)') bdb.execute('alter table T0 rename to t0') assert bdb.execute('select count(*) from t0_temp').fetchvalue() == 0 assert bdb.execute('select count(*) from t0').fetchvalue() > 0 with pytest.raises(bayeslite.BQLError): # Cannot specify models with rename. bdb.execute('alter generator p_cc models (1) rename to p_cc_fail') bdb.execute('drop table T0_TEMP') bdb.execute('analyze p_cc model 0 for 1 iteration') bdb.execute('analyze p_cc model 1 for 1 iteration') bdb.execute('analyze p_cc models 0-1 for 1 iteration') bdb.execute('analyze p_cc models 0,1 for 1 iteration') bdb.execute('analyze p_cc for 1 iteration') bdb.execute('select * from t0').fetchall() bdb.execute('select * from T0').fetchall() bdb.execute('estimate * from p').fetchall() bdb.execute('estimate * from P').fetchall() # SIMIARITY IN THE CONTEXT OF requires exactly 1 variable. with pytest.raises(bayeslite.BQLError): bdb.execute('estimate similarity in the context of * ' 'from pairwise p').fetchall() bdb.execute('estimate similarity in the context of age ' 'from pairwise p').fetchall() bdb.execute('alter population p rename to p2') assert core.bayesdb_population_name(bdb, population_id) == 'p2' bdb.execute('estimate similarity to (rowid=1) in the context of rank ' 'from p2').fetchall() bdb.execute('select value from' ' (estimate correlation from pairwise columns of p2)').fetchall() bdb.execute('infer explicit predict age with confidence 0.9' ' from p2').fetchall() bdb.execute('infer explicit predict AGE with confidence 0.9' ' from P2').fetchall() bdb.execute('infer explicit predict aGe with confidence 0.9' ' from P2').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('estimate predict agee with confidence 0.9 from p2') with pytest.raises(bayeslite.BQLError): bdb.execute('infer explicit predict agee with confidence 0.9' ' from p2') guess.bayesdb_guess_population(bdb, 'pe', 't0', overrides=[ ('age', 'numerical'), ('rank', 'numerical'), ]) bdb.execute('create generator pe_cc for pe;') with pytest.raises(bayeslite.BQLError): # No models to analyze. bdb.execute('analyze pe_cc for 1 iteration') bdb.execute('initialize 1 model if not exists for pe_cc') bdb.execute('analyze pe_cc for 1 iteration') bdb.execute('estimate correlation' ' from pairwise columns of pe').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('initialize 4 models if not exists for t') with pytest.raises(bayeslite.BQLError): bdb.execute('analyze t0 for 1 iteration') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate * from t') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate * from columns of t') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate correlation from pairwise columns of t') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate similarity in the context of age ' 'from pairwise t') bdb.execute('initialize 6 models if not exists for p_cc') bdb.execute('analyze p_cc for 1 iteration') def test_trivial_deadline(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 second') def test_parametrized(): assert bql2sqlparam('select * from t where id = ?') == \ 'SELECT * FROM "t" WHERE ("id" = ?1);' assert bql2sqlparam('select * from t where id = :foo') == \ 'SELECT * FROM "t" WHERE ("id" = ?1);' assert bql2sqlparam('select * from t where id = $foo') == \ 'SELECT * FROM "t" WHERE ("id" = ?1);' assert bql2sqlparam('select * from t where id = @foo') == \ 'SELECT * FROM "t" WHERE ("id" = ?1);' assert bql2sqlparam('select * from t where id = ?123') == \ 'SELECT * FROM "t" WHERE ("id" = ?1);' assert bql2sqlparam('select * from t where a = $foo and b = ?1;') == \ 'SELECT * FROM "t" WHERE (("a" = ?1) AND ("b" = ?1));' assert bql2sqlparam('select * from t' + ' where a = ?123 and b = :foo and c = ?124') == \ 'SELECT * FROM "t" WHERE' + \ ' ((("a" = ?1) AND ("b" = ?2)) AND ("c" = ?2));' with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) assert bql_execute(bdb, 'select count(*) from t') == [(7,)] assert bql_execute(bdb, 'select count(distinct division) from t') == \ [(6,)] assert bql_execute(bdb, 'select * from t where height > ?', (70,)) == \ [ (41, 'M', 65600, 72, 'marketing', 4), (30, 'M', 70000, 73, 'sales', 4), (30, 'F', 81000, 73, 'engineering', 3), ] assert bql_execute(bdb, 'select * from t where height > ?123', (0,)*122 + (70,)) == \ [ (41, 'M', 65600, 72, 'marketing', 4), (30, 'M', 70000, 73, 'sales', 4), (30, 'F', 81000, 73, 'engineering', 3), ] assert bql_execute(bdb, 'select age from t where division = :division', {':division': 'sales'}) == \ [(34,), (30,)] assert bql_execute(bdb, 'select division from t' + ' where age < @age and rank > ?;', (40, 4)) == \ [('accounting',)] assert bql_execute(bdb, 'select division from t' + ' where age < @age and rank > :rank;', {':RANK': 4, '@aGe': 40}) == \ [('accounting',)] with pytest.raises(ValueError): bdb.execute('select * from t where age < ? and rank > :r', {':r': 4}) def traced_execute(query, *args): bql = [] def trace(string, _bindings): bql.append(' '.join(string.split())) bdb.trace(trace) with bdb.savepoint(): bdb.execute(query, *args) bdb.untrace(trace) return bql def sqltraced_execute(query, *args): sql = [] def trace(string, _bindings): sql.append(' '.join(string.split())) bdb.sql_trace(trace) with bdb.savepoint(): bdb.execute(query, *args) bdb.sql_untrace(trace) return sql guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute('create generator p_cc for p;') bdb.execute('initialize 1 model for p_cc;') assert traced_execute('estimate similarity to (rowid = 1)' ' in the context of (estimate * from columns of p limit 1)' ' from p;') == [ 'estimate similarity to (rowid = 1)' \ ' in the context of (estimate * from columns of p limit 1)' \ ' from p;', ] assert sqltraced_execute('estimate similarity to (rowid = 1)' ' in the context of (estimate * from columns of p limit 1)' ' from p;') == [ 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?', 'SELECT id FROM bayesdb_population WHERE name = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?', 'SELECT id FROM bayesdb_population WHERE name = ?', 'SELECT v.name AS name FROM bayesdb_variable AS v' ' WHERE v.population_id = 1' ' AND v.generator_id IS NULL' ' LIMIT 1', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT tabname FROM bayesdb_population' ' WHERE id = ?', 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' ' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"', 'SELECT id FROM bayesdb_generator WHERE population_id = ?', 'SELECT backend FROM bayesdb_generator WHERE id = ?', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ?', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual ' 'WHERE generator_id = ? AND table_rowid = ?', 'SELECT engine_stamp FROM bayesdb_cgpm_generator ' 'WHERE generator_id = ?' ] assert sqltraced_execute('estimate similarity to (rowid = 1)' ' in the context of (estimate * from columns of p limit ?)' ' from p;', (1,)) == [ 'SELECT COUNT(*) FROM bayesdb_population' ' WHERE name = ?', 'SELECT id FROM bayesdb_population' ' WHERE name = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT COUNT(*) FROM bayesdb_population' ' WHERE name = ?', 'SELECT id FROM bayesdb_population' ' WHERE name = ?', # ESTIMATE * FROM COLUMNS OF: 'SELECT v.name AS name' ' FROM bayesdb_variable AS v' ' WHERE v.population_id = 1' ' AND v.generator_id IS NULL' ' LIMIT ?1', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', # ESTIMATE SIMILARITY TO (rowid=1): 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' ' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"', 'SELECT id FROM bayesdb_generator WHERE population_id = ?', 'SELECT backend FROM bayesdb_generator WHERE id = ?', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ?', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ?', 'SELECT engine_stamp FROM bayesdb_cgpm_generator' ' WHERE generator_id = ?' ] assert sqltraced_execute( 'create temp table if not exists sim as ' 'simulate age, RANK, division ' 'from p given gender = \'F\' limit 4') == [ 'PRAGMA table_info("sim")', 'PRAGMA table_info("bayesdb_temp_0")', 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?', 'SELECT id FROM bayesdb_population WHERE name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT CAST(4 AS INTEGER), \'F\'', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT MAX(_rowid_) FROM "t"', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT id FROM bayesdb_generator' ' WHERE population_id = ?', 'SELECT backend FROM bayesdb_generator WHERE id = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT 1 FROM "t" WHERE oid = ?', 'SELECT 1 FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ? LIMIT 1', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT code FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND value = ?', 'SELECT engine_stamp FROM bayesdb_cgpm_generator' ' WHERE generator_id = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'CREATE TEMP TABLE "bayesdb_temp_0"' ' ("age","RANK","division")', 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")' ' VALUES (?,?,?)', 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")' ' VALUES (?,?,?)', 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")' ' VALUES (?,?,?)', 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")' ' VALUES (?,?,?)', 'CREATE TEMP TABLE IF NOT EXISTS "sim" AS' ' SELECT * FROM "bayesdb_temp_0"', 'DROP TABLE "bayesdb_temp_0"' ] assert sqltraced_execute( 'select * from (simulate age from p ' 'given gender = \'F\' limit 4)') == [ 'PRAGMA table_info("bayesdb_temp_1")', 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?', 'SELECT id FROM bayesdb_population WHERE name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT CAST(4 AS INTEGER), \'F\'', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT COUNT(*) FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT colno FROM bayesdb_variable' ' WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?)' ' AND name = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT MAX(_rowid_) FROM "t"', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT token FROM bayesdb_rowid_tokens', 'SELECT id FROM bayesdb_generator WHERE population_id = ?', 'SELECT backend FROM bayesdb_generator WHERE id = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT tabname FROM bayesdb_population WHERE id = ?', 'SELECT 1 FROM "t" WHERE oid = ?', 'SELECT 1 FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ? LIMIT 1', 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual' ' WHERE generator_id = ? AND table_rowid = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT code FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND value = ?', 'SELECT engine_stamp FROM bayesdb_cgpm_generator' ' WHERE generator_id = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?' ' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?', 'SELECT value FROM bayesdb_cgpm_category' ' WHERE generator_id = ? AND colno = ? AND code = ?', 'CREATE TEMP TABLE "bayesdb_temp_1" ("age")', 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)', 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)', 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)', 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)', 'SELECT * FROM (SELECT * FROM "bayesdb_temp_1")', 'DROP TABLE "bayesdb_temp_1"', ] bdb.execute(''' create population q for t ( age NUMERICAL; gender NOMINAL; -- Not binary! salary NUMERICAL; height NUMERICAL; division NOMINAL; rank NOMINAL; ) ''') bdb.execute('create generator q_cc for q;') bdb.execute('initialize 1 model for q_cc;') assert sqltraced_execute('analyze q_cc for 1 iteration;') == [ 'SELECT COUNT(*) FROM bayesdb_generator WHERE name = ?', 'SELECT id FROM bayesdb_generator WHERE name = ?', 'SELECT backend FROM bayesdb_generator WHERE id = ?', 'SELECT engine_json, engine_stamp FROM bayesdb_cgpm_generator' ' WHERE generator_id = ?', 'SELECT population_id FROM bayesdb_generator WHERE id = ?', 'SELECT engine_stamp FROM bayesdb_cgpm_generator' ' WHERE generator_id = ?', 'UPDATE bayesdb_cgpm_generator' ' SET engine_json = :engine_json, engine_stamp = :engine_stamp' ' WHERE generator_id = :generator_id'] def test_create_table_ifnotexists_as_simulate(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) # If not exists table tests guess.bayesdb_guess_population(bdb, 'p', 't', overrides=[('age', 'numerical')]) bdb.execute('create generator p_cc for p;') bdb.execute('initialize 1 model for p_cc') bdb.execute('analyze p_cc for 1 iteration') bdb.execute(''' create table if not exists u as simulate age from p limit 10 ''') bdb.execute("drop table u") bdb.execute(''' create table if not exists w as simulate age from p given division='sales' limit 10 ''') bdb.execute("drop table w") bdb.execute("create table u as simulate age from p limit 10") x = bdb.execute("select count (*) from u").fetchvalue() bdb.execute(''' create table if not exists u as simulate age from p limit 10 ''') bdb.execute(''' create table if not exists u as simulate age from p given division='sales' limit 10 ''') assert x == bdb.execute("select count (*) from u").fetchvalue() def test_createtab(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): with pytest.raises(apsw.SQLError): bdb.execute('drop table t') bdb.execute('drop table if exists t') with pytest.raises(bayeslite.BQLError): bdb.execute('drop population p') bdb.execute('drop population if exists p') with pytest.raises(bayeslite.BQLError): bdb.execute('drop generator p_cc') bdb.execute('drop generator if exists p_cc') with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) with bdb.savepoint(): # Savepoint because we don't actually want the new data to # be inserted. with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True, ifnotexists=True) guess.bayesdb_guess_population(bdb, 'p', 't', overrides=[('age', 'numerical')]) bdb.execute('create generator p_cc for p;') with pytest.raises(bayeslite.BQLError): # Redefining population. bdb.execute('create population p for t (age numerical)') with pytest.raises(bayeslite.BQLError): # Redefining generator. bdb.execute('create generator p_cc for p;') # Make sure ignore columns work. # # XXX Also check key columns. guess.bayesdb_guess_population(bdb, 'p0', 't', overrides=[('age', 'ignore')]) bdb.execute('drop population p0') population_id = core.bayesdb_get_population(bdb, 'p') colno = core.bayesdb_variable_number(bdb, population_id, None, 'age') assert core.bayesdb_variable_stattype( bdb, population_id, None, colno) == 'numerical' bdb.execute('initialize 1 model for p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('drop table t') with pytest.raises(bayeslite.BQLError): bdb.execute('drop population p') bdb.execute('drop generator p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('drop generator p_cc') with pytest.raises(bayeslite.BQLError): bdb.execute('drop table t') bdb.execute('drop generator if exists p_cc') bdb.execute('drop population p') bdb.execute('drop population if exists p') bdb.execute('drop table t') bdb.execute('drop table if exists t') with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute("create table u as select * from t where gender = 'F'") assert bql_execute(bdb, 'select * from u') == [ (23, 'F', 81000, 67, 'data science', 3), (36, 'F', 96000, 70, 'management', 2), (30, 'F', 81000, 73, 'engineering', 3), ] with pytest.raises(bayeslite.BQLError): bdb.execute("create table u as select * from t where gender = 'F'") bdb.execute('drop table u') with pytest.raises(apsw.SQLError): bql_execute(bdb, 'select * from u') bdb.execute("create temp table u as" " select * from t where gender = 'F'") assert bql_execute(bdb, 'select * from u') == [ (23, 'F', 81000, 67, 'data science', 3), (36, 'F', 96000, 70, 'management', 2), (30, 'F', 81000, 73, 'engineering', 3), ] # XXX Test to make sure TEMP is passed through, and the table # doesn't persist on disk. def test_alterpop_addvar(): with bayeslite.bayesdb_open() as bdb: bayeslite.bayesdb_read_csv( bdb, 't', StringIO.StringIO(test_csv.csv_data), header=True, create=True) bdb.execute(''' create population p for t with schema( age numerical; gender nominal; salary numerical; height ignore; division ignore; rank ignore; ) ''') population_id = core.bayesdb_get_population(bdb, 'p') bdb.execute('create generator m for p;') # Fail when variable does not exist in base table. with pytest.raises(bayeslite.BQLError): bdb.execute('alter population p add variable quux;') # Fail when variable already in population. with pytest.raises(bayeslite.BQLError): bdb.execute('alter population p add variable age numerical;') # Fail when given invalid statistical type. with pytest.raises(bayeslite.BQLError): bdb.execute('alter population p add variable heigh numr;') # Alter pop with stattype. assert not core.bayesdb_has_variable(bdb, population_id, None, 'height') bdb.execute('alter population p add variable height numerical;') assert core.bayesdb_has_variable(bdb, population_id, None, 'height') # Alter pop multiple without stattype. assert not core.bayesdb_has_variable(bdb, population_id, None, 'rank') assert not core.bayesdb_has_variable( bdb, population_id, None, 'division') bdb.execute(''' alter population p add variable rank, add variable division; ''') assert core.bayesdb_has_variable(bdb, population_id, None, 'rank') assert core.bayesdb_has_variable(bdb, population_id, None, 'division') # Add a new column weight to the base table. bdb.sql_execute('alter table t add column weight real;') # Fail when no values in new column. with pytest.raises(bayeslite.BQLError): bdb.execute('alter population p add variable weight numerical;') assert not core.bayesdb_has_variable(bdb, population_id, None, 'weight') # Update a single value and update the population. bdb.sql_execute('update t set weight = 1 where oid = 1;') bdb.execute('alter population p add variable weight numerical;') assert core.bayesdb_has_variable(bdb, population_id, None, 'weight') def test_txn(): with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname): # Make sure rollback and commit fail outside a transaction. with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('ROLLBACK') with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('COMMIT') # Open a transaction which we'll roll back. bdb.execute('BEGIN') try: # Make sure transactions don't nest. (Use savepoints.) with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('BEGIN') finally: bdb.execute('ROLLBACK') # Make sure rollback and commit still fail outside a transaction. with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('ROLLBACK') with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('COMMIT') # Open a transaction which we'll commit. bdb.execute('BEGIN') try: with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('BEGIN') finally: bdb.execute('COMMIT') with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('ROLLBACK') with pytest.raises(bayeslite.BayesDBTxnError): bdb.execute('COMMIT') # Make sure ROLLBACK undoes the effects of the transaction. bdb.execute('BEGIN') try: with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute('SELECT * FROM t').fetchall() guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute('ESTIMATE * FROM p').fetchall() finally: bdb.execute('ROLLBACK') with pytest.raises(apsw.SQLError): bdb.execute('SELECT * FROM t') with pytest.raises(bayeslite.BQLError): bdb.execute('ESTIMATE * FROM p') # Make sure CREATE and DROP both work in the transaction. bdb.execute('BEGIN') try: with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute('SELECT * FROM t').fetchall() guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute('ESTIMATE * FROM p').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('DROP TABLE t') bdb.execute('DROP POPULATION p') with pytest.raises(bayeslite.BQLError): bdb.execute('ESTIMATE * FROM p') bdb.execute('DROP TABLE t') with pytest.raises(apsw.SQLError): bdb.execute('SELECT * FROM t') finally: bdb.execute('ROLLBACK') with pytest.raises(bayeslite.BQLError): bdb.execute('ESTIMATE * FROM p') with pytest.raises(apsw.SQLError): bdb.execute('SELECT * FROM t') # Make sure CREATE and DROP work even if we commit. bdb.execute('BEGIN') try: with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute('SELECT * FROM t').fetchall() guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute('ESTIMATE * FROM p').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('DROP TABLE t') bdb.execute('DROP POPULATION p') with pytest.raises(bayeslite.BQLError): bdb.execute('ESTIMATE * FROM p') bdb.execute('DROP TABLE t') with pytest.raises(apsw.SQLError): bdb.execute('SELECT * FROM t') finally: bdb.execute('COMMIT') with pytest.raises(bayeslite.BQLError): bdb.execute('ESTIMATE * FROM p') with pytest.raises(apsw.SQLError): bdb.execute('SELECT * FROM t') # Make sure CREATE persists if we commit. bdb.execute('BEGIN') try: with open(fname, 'rU') as f: bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True) bdb.execute('SELECT * FROM t').fetchall() guess.bayesdb_guess_population(bdb, 'p', 't') bdb.execute('ESTIMATE * FROM p').fetchall() finally: bdb.execute('COMMIT') bdb.execute('SELECT * FROM t').fetchall() bdb.execute('ESTIMATE * FROM p').fetchall() # Make sure bdb.transaction works, rolls back on exception, # and handles nesting correctly in the context of savepoints. try: with bdb.transaction(): bdb.sql_execute('create table quagga(x)') raise StopIteration except StopIteration: pass with pytest.raises(apsw.SQLError): bdb.execute('select * from quagga') with bdb.transaction(): with bdb.savepoint(): with bdb.savepoint(): pass with bdb.savepoint(): with pytest.raises(bayeslite.BayesDBTxnError): with bdb.transaction(): pass # XXX To do: Make sure other effects (e.g., analysis) get # rolled back by ROLLBACK. def test_predprob_null(): backend = CGPM_Backend({}, multiprocess=False) with test_core.bayesdb(backend=backend) as bdb: bdb.sql_execute(''' create table foo ( id integer primary key not null, x numeric, y numeric, z numeric ) ''') bdb.sql_execute("insert into foo values (1, 1, 'strange', 3)") bdb.sql_execute("insert into foo values (2, 1.2, 'strange', 1)") bdb.sql_execute("insert into foo values (3, 0.8, 'strange', 3)") bdb.sql_execute("insert into foo values (4, NULL, 'strange', 9)") bdb.sql_execute("insert into foo values (5, 73, 'up', 11)") bdb.sql_execute("insert into foo values (6, 80, 'up', -1)") bdb.sql_execute("insert into foo values (7, 60, 'up', NULL)") bdb.sql_execute("insert into foo values (8, 67, NULL, NULL)") bdb.sql_execute("insert into foo values (9, 3.1415926, 'down', 1)") bdb.sql_execute("insert into foo values (10, 1.4142135, 'down', 0)") bdb.sql_execute("insert into foo values (11, 2.7182818, 'down', -1)") bdb.sql_execute("insert into foo values (12, NULL, 'down', 10)") bdb.execute(''' create population pfoo for foo ( id ignore; x numerical; y nominal; z numerical; ) ''') bdb.execute('create generator pfoo_cc for pfoo using cgpm;') bdb.execute('initialize 1 model for pfoo_cc') bdb.execute('analyze pfoo_cc for 1 iteration') # Null value => null predictive probability. assert bdb.execute('estimate predictive probability of x' ' from pfoo where id = 4;').fetchall() == \ [(None,)] # Nonnull value => nonnull predictive probability. x = bdb.execute('estimate predictive probability of x' ' from pfoo where id = 5').fetchall() assert len(x) == 1 assert len(x[0]) == 1 assert isinstance(x[0][0], (int, float)) # All null values => null predictive probability. assert bdb.execute('estimate predictive probability of (y, z)' ' from pfoo where id = 8;').fetchall() == \ [(None,)] # Some nonnull values => nonnull predictive probability. x = bdb.execute('estimate predictive probability of (x, z)' ' from pfoo where id = 8;').fetchall() assert len(x) == 1 assert len(x[0]) == 1 assert isinstance(x[0][0], (int, float)) # All NULL constraints => same result regardless of given clause. c0 = bdb.execute('estimate predictive probability of x' ' from pfoo where id = 8;') v0 = cursor_value(c0) assert v0 is not None c1 = bdb.execute('estimate predictive probability of x given (y, z)' ' from pfoo where id = 8;') v1 = cursor_value(c1) assert relerr(v0, v1) < 0.0001 def test_guess_all(): with test_core.bayesdb() as bdb: bdb.sql_execute('create table foo (x numeric, y numeric, z numeric)') bdb.sql_execute('insert into foo values (1, 2, 3)') bdb.sql_execute('insert into foo values (4, 5, 6)') # XXX GUESS(*) guess.bayesdb_guess_population(bdb, 'pfoo', 'foo') def test_misc_errors(): with test_core.t1() as (bdb, _population_id, _generator_id): with pytest.raises(bayeslite.BQLError): bdb.execute('create table t1 as SELECT 1 FROM t1' # t1 already exists as a table. ' limit 1') with pytest.raises(bayeslite.BQLError): # t1 already exists as a table. bdb.execute('create table t1 as simulate weight from p1' ' limit 1') with pytest.raises(bayeslite.BQLError): # t1x does not exist as a population. bdb.execute('create table t1_sim as simulate weight from t1x' ' limit 1') with pytest.raises(bayeslite.BQLError): # p1 does not have a variable waught. bdb.execute('create table t1_sim as simulate waught from p1' ' limit 1') with pytest.raises(bayeslite.BQLError): # p1 does not have a variable agee. bdb.execute('create table t1_sim as simulate weight from p1' ' given agee = 42 limit 1') with bdb.savepoint(): bdb.sql_execute('create table t2(x)') with pytest.raises(bayeslite.BQLError): # t1 already exists as a table. bdb.execute('alter table t2 rename to t1') with pytest.raises(NotImplementedError): # Renaming columns is not yet implemented. bdb.execute('alter table t1 rename weight to mass') with pytest.raises(bayeslite.BQLError): # xcat does not exist as a backend. bdb.execute('create generator p1_xc for p1 using xcat()') with pytest.raises(bayeslite.BQLError): # p1 already exists as a population. bdb.execute('create generator p1_cc for p1;') with pytest.raises(bayeslite.BQLError): # multinomial is not a known statistical type. bdb.execute(''' create population q1 for t1( ignore id, label, weight; weight multinomial ) ''') with pytest.raises(bayeslite.BQLError): # p1_xc does not exist as a generator. bdb.execute('alter generator p1_xc rename to p1_xcat') with bdb.savepoint(): bdb.execute('create generator p1_xc for p1;') with pytest.raises(bayeslite.BQLError): # p1_xc already exists as a generator. bdb.execute('alter generator p1_cc rename to p1_xc') with pytest.raises(bayeslite.BQLParseError): # WAIT is not allowed. bdb.execute('analyze p1_cc for 1 iteration wait') with bdb.savepoint(): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration') bdb.execute('initialize 1 model for p1_xc') bdb.execute('analyze p1_xc for 1 iteration') with pytest.raises(apsw.SQLError): bdb.execute('select' ' nonexistent((simulate age from p1 limit 1));') with pytest.raises(ValueError): bdb.execute('select :x', {'y': 42}) with pytest.raises(ValueError): bdb.execute('select :x', {'x': 53, 'y': 42}) with pytest.raises(ValueError): bdb.execute('select ?, ?', (1,)) with pytest.raises(ValueError): bdb.execute('select ?', (1, 2)) with pytest.raises(TypeError): bdb.execute('select ?', 42) with pytest.raises(NotImplementedError): bdb.execute('infer explicit predict age confidence ac, *' ' from p1') with pytest.raises(NotImplementedError): bdb.execute('infer explicit predict age confidence ac,' ' t1.(select age from t1 limit 1) from p1') with pytest.raises(bayeslite.BQLError): try: bdb.execute('estimate similarity to (rowid=1)' ' in the context of agee from p1') except bayeslite.BQLError as e: assert 'No such columns in population:' in str(e) raise def test_nested_simulate(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration') bdb.execute('select (simulate age from p1 limit 1),' ' (simulate weight from p1 limit 1)').fetchall() assert bdb.temp_table_name() == 'bayesdb_temp_2' assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_0') assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_1') bdb.execute('simulate weight from p1' ' given age = (simulate age from p1 limit 1)' ' limit 1').fetchall() # Make sure unwinding doesn't raise an exception. Calling # __del__ directly, rather than via del(), has two effects: # # (a) It actually raises any exceptions in the method, unlike # del(), which suppresses them. # # (b) It may cause a subsequent __del__ to fail and raise an # exception, so that a subsequent del(), including an implicit # one at the end of a scope, may print a message to stderr. # # Effect (a) is what we are actually trying to test. Effect # (b) is a harmless consequence as far as pytest is concerned, # as long as the test otherwise passes. bdb.execute('simulate weight from p1' ' given age = (simulate age from p1 limit 1)' ' limit 1').__del__() def test_checkpoint__ci_slow(): with test_core.t1() as (bdb, population_id, generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 10 iterations checkpoint 1 iteration') # No checkpoint by seconds. with pytest.raises(NotImplementedError): bdb.execute('analyze p1_cc for 5 seconds checkpoint 1 second') bdb.execute('drop models from p1_cc') bdb.execute('initialize 1 model for p1_cc') # No checkpoint by seconds. with pytest.raises(NotImplementedError): bdb.execute('analyze p1_cc for 5 iterations checkpoint 1 second') bdb.execute('drop models from p1_cc') bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration checkpoint 2 iterations') def test_infer_confidence__ci_slow(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration') bdb.execute('infer explicit rowid, rowid as another_rowid, 4,' ' age, predict age as age_inf confidence age_conf' ' from p1').fetchall() def test_infer_as_estimate(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration') bdb.execute('infer explicit predictive probability of age' ' from p1').fetchall() def test_infer_error(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('infer explicit predict age confidence age_conf' ' from p1').fetchall() with pytest.raises(bayeslite.BQLError): bdb.execute('infer explicit predict agee confidence age_conf' ' from p1').fetchall() def test_estimate_by(): with test_core.t1() as (bdb, _population_id, _generator_id): bdb.execute('initialize 1 model for p1_cc') bdb.execute('analyze p1_cc for 1 iteration') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate predictive probability of age' ' by p1') with pytest.raises(bayeslite.BQLError): bdb.execute('estimate similarity to (rowid=1) ' 'in the context of age by p1') def check(x, bindings=None): assert len(bdb.execute(x, bindings=bindings).fetchall()) == 1 check('estimate probability density of age = 42 by p1') check('estimate dependence probability of age with weight by p1') check('estimate mutual information of age with weight by p1') check('estimate correlation of age with weight by p1') check('estimate correlation pvalue of age with weight by p1') rowid = bdb.execute('select min(rowid) from t1').fetchall()[0][0] check(''' estimate similarity of (rowid=?) to (rowid=?) in the context of weight by p1 ''', (rowid, rowid,)) def test_empty_cursor(): with bayeslite.bayesdb_open() as bdb: assert bdb.execute('SELECT 0').connection == bdb empty(bdb.execute('BEGIN')) empty(bdb.execute('COMMIT')) empty(bdb.sql_execute('CREATE TABLE t(x, y, z)')) empty(bdb.sql_execute('INSERT INTO t VALUES(1,2,3)')) empty(bdb.sql_execute('INSERT INTO t VALUES(4,5,6)')) empty(bdb.sql_execute('INSERT INTO t VALUES(7,8,9)')) empty(bdb.execute('CREATE POPULATION p FOR t ' '(IGNORE z,y; x NOMINAL)')) empty(bdb.execute('CREATE GENERATOR p_cc FOR p;')) empty(bdb.execute('INITIALIZE 1 MODEL FOR p_cc')) empty(bdb.execute('DROP GENERATOR p_cc')) empty(bdb.execute('DROP POPULATION p')) empty(bdb.execute('DROP TABLE t')) def test_create_generator_ifnotexists(): # XXX Test other backends too, because they have a role in ensuring that # this works. Their create_generator will still be called. # # [TRC 20160627: The above comment appears to be no longer true -- # if it was ever true.] for using_clause in ('cgpm()',): with bayeslite.bayesdb_open() as bdb: bdb.sql_execute('CREATE TABLE t(x, y, z)') bdb.sql_execute('INSERT INTO t VALUES(1,2,3)') bdb.execute(''' CREATE POPULATION p FOR t ( x NUMERICAL; y NUMERICAL; z NOMINAL; ) ''') for _i in (0, 1): bdb.execute('CREATE GENERATOR IF NOT EXISTS p_cc FOR p USING ' + using_clause) try: bdb.execute('CREATE GENERATOR p_cc FOR p USING ' + using_clause) assert False # Should have said it exists. except bayeslite.BQLError: pass def test_bql_rand(): with bayeslite.bayesdb_open() as bdb: bdb.sql_execute('CREATE TABLE frobotz(x)') for _ in range(10): bdb.sql_execute('INSERT INTO frobotz VALUES(2)') cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;') rands = cursor.fetchall() # These are "the" random numbers (internal PRNG is seeded to 0) ans = [(0.28348770982811367,), (0.4789774612650598,), (0.07824908989551316,), (0.6091223239372148,), (0.03906608409906187,), (0.3690599096081546,), (0.8223420512129717,), (0.7777771914916722,), (0.061856771629497986,), (0.6492586781908201,)] assert rands == ans def test_bql_rand2(): seed = struct.pack('<QQQQ', 0, 0, 0, 3) with bayeslite.bayesdb_open(seed=seed) as bdb: bdb.sql_execute('CREATE TABLE frobotz(x)') for _ in range(10): bdb.sql_execute('INSERT INTO frobotz VALUES(2)') cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;') rands = cursor.fetchall() ans = [(0.8351877951287725,), (0.9735099617243271,), (0.026142315910925418,), (0.09380653289687524,), (0.1097050387582088,), (0.33154896906379605,), (0.4579314980719317,), (0.09072802203491703,), (0.5276180968829105,), (0.9993280772797679,)] assert rands == ans class MockTracerOneQuery(bayeslite.IBayesDBTracer): def __init__(self, q, qid): self.q = q self.qid = qid self.start_calls = 0 self.ready_calls = 0 self.error_calls = 0 self.finished_calls = 0 self.abandoned_calls = 0 def start(self, qid, query, bindings): assert qid == self.qid assert query == self.q assert bindings == () self.start_calls += 1 def ready(self, qid, _cursor): assert qid == self.qid self.ready_calls += 1 def error(self, qid, _e): assert qid == self.qid self.error_calls += 1 def finished(self, qid): assert qid == self.qid self.finished_calls += 1 def abandoned(self, qid): assert qid == self.qid self.abandoned_calls += 1 def test_tracing_smoke(): with test_core.t1() as (bdb, _population_id, _generator_id): q = 'SELECT * FROM t1' tracer = MockTracerOneQuery(q, 1) bdb.trace(tracer) cursor = bdb.execute(q) assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 0 assert tracer.abandoned_calls == 0 cursor.fetchall() assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 1 assert tracer.abandoned_calls == 0 del cursor assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 1 assert tracer.abandoned_calls == 1 bdb.untrace(tracer) # XXX Make sure the whole cursor API works. q = 'SELECT 42' tracer = MockTracerOneQuery(q, 2) bdb.trace(tracer) cursor = bdb.execute(q) assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 0 assert tracer.abandoned_calls == 0 assert cursor.fetchvalue() == 42 assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 1 assert tracer.abandoned_calls == 0 del cursor assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 1 assert tracer.abandoned_calls == 1 def test_tracing_error_smoke(): with test_core.t1() as (bdb, _population_id, _generator_id): q = 'SELECT * FROM wrong' tracer = MockTracerOneQuery(q, 1) bdb.trace(tracer) with pytest.raises(apsw.SQLError): bdb.execute(q) assert tracer.start_calls == 1 assert tracer.ready_calls == 0 assert tracer.error_calls == 1 assert tracer.finished_calls == 0 assert tracer.abandoned_calls == 0 class Boom(Exception): pass class ErroneousBackend(troll.TrollBackend): def __init__(self): self.call_ct = 0 def name(self): return 'erroneous' def logpdf_joint(self, *_args, **_kwargs): if self.call_ct > 10: # Wait to avoid raising during sqlite's prefetch raise Boom() self.call_ct += 1 return 0 def test_tracing_execution_error_smoke(): with test_core.t1() as (bdb, _population_id, _generator_id): bayeslite.bayesdb_register_backend(bdb, ErroneousBackend()) bdb.execute('DROP GENERATOR p1_cc') bdb.execute('CREATE GENERATOR p1_err FOR p1 USING erroneous()') q = 'ESTIMATE PREDICTIVE PROBABILITY OF age FROM p1' tracer = MockTracerOneQuery(q, 1) bdb.trace(tracer) cursor = bdb.execute(q) assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 0 assert tracer.finished_calls == 0 assert tracer.abandoned_calls == 0 with pytest.raises(Boom): cursor.fetchall() assert tracer.start_calls == 1 assert tracer.ready_calls == 1 assert tracer.error_calls == 1 assert tracer.finished_calls == 0 assert tracer.abandoned_calls == 0 def test_pdf_var(): with test_core.t1() as (bdb, population_id, _generator_id): bdb.execute('initialize 6 models for p1_cc;') c = bdb.execute( 'estimate probability density of label = label from p1') c.fetchall() assert bql2sql( 'estimate probability density of label = label from p1') == \ 'SELECT bql_pdf_joint(1, NULL, NULL, 1, "label") FROM "t1";'
[ "bayeslite.core.bayesdb_generator_table", "bayeslite.backends.cgpm_backend.CGPM_Backend", "test_csv.bayesdb_csv_file", "bayeslite.bayesdb_open", "bayeslite.bayesdb_read_csv", "bayeslite.compiler.compile_query", "bayeslite.guess.bayesdb_guess_population", "StringIO.StringIO", "test_core.t1_data", "test_core.t1", "struct.pack", "pytest.raises", "bayeslite.compiler.Output", "bayeslite.core.bayesdb_get_population", "bayeslite.core.bayesdb_has_variable", "bayeslite.ast.is_query", "bayeslite.parse.parse_bql_string", "bayeslite.core.bayesdb_population_name", "bayeslite.math_util.relerr", "bayeslite.core.bayesdb_generator_name", "pytest.mark.xfail", "bayeslite.core.bayesdb_variable_number", "test_core.bayesdb", "bayeslite.util.cursor_value", "bayeslite.core.bayesdb_variable_stattype", "test_core.t1_schema", "stochastic.stochastic", "bayeslite.core.bayesdb_has_table", "bayeslite.core.bayesdb_get_generator" ]
[((9123, 9159), 'stochastic.stochastic', 'stochastic', ([], {'max_runs': '(2)', 'min_passes': '(1)'}), '(max_runs=2, min_passes=1)\n', (9133, 9159), False, 'from stochastic import stochastic\n'), ((10593, 10629), 'stochastic.stochastic', 'stochastic', ([], {'max_runs': '(2)', 'min_passes': '(1)'}), '(max_runs=2, min_passes=1)\n', (10603, 10629), False, 'from stochastic import stochastic\n'), ((54232, 54304), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'reason': '"""no simulate vars from models of"""'}), "(strict=True, reason='no simulate vars from models of')\n", (54249, 54304), False, 'import pytest\n'), ((107099, 107135), 'bayeslite.backends.cgpm_backend.CGPM_Backend', 'CGPM_Backend', (['{}'], {'multiprocess': '(False)'}), '({}, multiprocess=False)\n', (107111, 107135), False, 'from bayeslite.backends.cgpm_backend import CGPM_Backend\n'), ((121702, 121734), 'struct.pack', 'struct.pack', (['"""<QQQQ"""', '(0)', '(0)', '(0)', '(3)'], {}), "('<QQQQ', 0, 0, 0, 3)\n", (121713, 121734), False, 'import struct\n'), ((1224, 1258), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', (['""":memory:"""'], {}), "(':memory:')\n", (1246, 1258), False, 'import bayeslite\n'), ((1275, 1299), 'test_core.t1_schema', 'test_core.t1_schema', (['bdb'], {}), '(bdb)\n', (1294, 1299), False, 'import test_core\n'), ((1308, 1330), 'test_core.t1_data', 'test_core.t1_data', (['bdb'], {}), '(bdb)\n', (1325, 1330), False, 'import test_core\n'), ((1617, 1647), 'bayeslite.parse.parse_bql_string', 'parse.parse_bql_string', (['string'], {}), '(string)\n', (1639, 1647), True, 'import bayeslite.parse as parse\n'), ((1662, 1688), 'bayeslite.compiler.Output', 'compiler.Output', (['(0)', '{}', '()'], {}), '(0, {}, ())\n', (1677, 1688), True, 'import bayeslite.compiler as compiler\n'), ((1946, 1980), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', (['""":memory:"""'], {}), "(':memory:')\n", (1968, 1980), False, 'import bayeslite\n'), ((1997, 2021), 'test_core.t1_schema', 'test_core.t1_schema', (['bdb'], {}), '(bdb)\n', (2016, 2021), False, 'import test_core\n'), ((2030, 2052), 'test_core.t1_data', 'test_core.t1_data', (['bdb'], {}), '(bdb)\n', (2047, 2052), False, 'import test_core\n'), ((2286, 2316), 'bayeslite.parse.parse_bql_string', 'parse.parse_bql_string', (['string'], {}), '(string)\n', (2308, 2316), True, 'import bayeslite.parse as parse\n'), ((2332, 2351), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (2349, 2351), False, 'import StringIO\n'), ((3197, 3225), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (3210, 3225), False, 'import pytest\n'), ((3290, 3334), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (3315, 3334), False, 'import test_csv\n'), ((3750, 3794), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (3775, 3794), False, 'import test_csv\n'), ((4220, 4264), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (4245, 4264), False, 'import test_csv\n'), ((4782, 4826), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (4807, 4826), False, 'import test_csv\n'), ((5378, 5392), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (5390, 5392), False, 'import test_core\n'), ((7779, 7802), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (7792, 7802), False, 'import pytest\n'), ((8184, 8207), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (8197, 8207), False, 'import pytest\n'), ((9209, 9232), 'test_core.t1', 'test_core.t1', ([], {'seed': 'seed'}), '(seed=seed)\n', (9221, 9232), False, 'import test_core\n'), ((10673, 10696), 'test_core.t1', 'test_core.t1', ([], {'seed': 'seed'}), '(seed=seed)\n', (10685, 10696), False, 'import test_core\n'), ((11802, 11816), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (11814, 11816), False, 'import test_core\n'), ((22180, 22214), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (22193, 22214), False, 'import pytest\n'), ((22311, 22345), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (22324, 22345), False, 'import pytest\n'), ((22483, 22517), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (22496, 22517), False, 'import pytest\n'), ((22670, 22704), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (22683, 22704), False, 'import pytest\n'), ((22800, 22833), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (22813, 22833), False, 'import pytest\n'), ((22974, 23007), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (22987, 23007), False, 'import pytest\n'), ((23160, 23193), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (23173, 23193), False, 'import pytest\n'), ((23345, 23378), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (23358, 23378), False, 'import pytest\n'), ((25838, 25871), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (25851, 25871), False, 'import pytest\n'), ((26469, 26502), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (26482, 26502), False, 'import pytest\n'), ((26623, 26656), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (26636, 26656), False, 'import pytest\n'), ((26768, 26801), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (26781, 26801), False, 'import pytest\n'), ((26916, 26949), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (26929, 26949), False, 'import pytest\n'), ((27512, 27545), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (27525, 27545), False, 'import pytest\n'), ((27656, 27689), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (27669, 27689), False, 'import pytest\n'), ((27791, 27824), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (27804, 27824), False, 'import pytest\n'), ((27967, 28000), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (27980, 28000), False, 'import pytest\n'), ((28315, 28348), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (28328, 28348), False, 'import pytest\n'), ((28452, 28485), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (28465, 28485), False, 'import pytest\n'), ((28580, 28603), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (28593, 28603), False, 'import pytest\n'), ((28751, 28784), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (28764, 28784), False, 'import pytest\n'), ((39609, 39642), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (39622, 39642), False, 'import pytest\n'), ((39763, 39796), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (39776, 39796), False, 'import pytest\n'), ((39973, 40006), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (39986, 40006), False, 'import pytest\n'), ((40429, 40462), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (40442, 40462), False, 'import pytest\n'), ((40634, 40667), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (40647, 40667), False, 'import pytest\n'), ((41665, 41698), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (41678, 41698), False, 'import pytest\n'), ((41863, 41896), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (41876, 41896), False, 'import pytest\n'), ((42043, 42076), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (42056, 42076), False, 'import pytest\n'), ((42258, 42291), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (42271, 42291), False, 'import pytest\n'), ((42669, 42702), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (42682, 42702), False, 'import pytest\n'), ((42860, 42893), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (42873, 42893), False, 'import pytest\n'), ((43018, 43051), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (43031, 43051), False, 'import pytest\n'), ((46956, 46989), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (46969, 46989), False, 'import pytest\n'), ((47183, 47216), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (47196, 47216), False, 'import pytest\n'), ((47434, 47467), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (47447, 47467), False, 'import pytest\n'), ((47676, 47709), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (47689, 47709), False, 'import pytest\n'), ((47892, 47925), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (47905, 47925), False, 'import pytest\n'), ((48500, 48533), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (48513, 48533), False, 'import pytest\n'), ((48738, 48771), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (48751, 48771), False, 'import pytest\n'), ((49012, 49045), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (49025, 49045), False, 'import pytest\n'), ((49224, 49257), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (49237, 49257), False, 'import pytest\n'), ((50244, 50277), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (50257, 50277), False, 'import pytest\n'), ((50475, 50508), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (50488, 50508), False, 'import pytest\n'), ((50680, 50713), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (50693, 50713), False, 'import pytest\n'), ((51219, 51252), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (51232, 51252), False, 'import pytest\n'), ((52408, 52441), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (52421, 52441), False, 'import pytest\n'), ((55681, 55715), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (55694, 55715), False, 'import pytest\n'), ((62196, 62230), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (62209, 62230), False, 'import pytest\n'), ((62317, 62361), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (62342, 62361), False, 'import test_csv\n'), ((62866, 62911), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (62896, 62911), True, 'import bayeslite.guess as guess\n'), ((63018, 63081), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {'ifnotexists': '(True)'}), "(bdb, 'p', 't', ifnotexists=True)\n", (63048, 63081), True, 'import bayeslite.guess as guess\n'), ((64133, 64170), 'bayeslite.core.bayesdb_get_population', 'core.bayesdb_get_population', (['bdb', '"""p"""'], {}), "(bdb, 'p')\n", (64160, 64170), True, 'import bayeslite.core as core\n'), ((64194, 64248), 'bayeslite.core.bayesdb_get_generator', 'core.bayesdb_get_generator', (['bdb', 'population_id', '"""p_cc"""'], {}), "(bdb, population_id, 'p_cc')\n", (64220, 64248), True, 'import bayeslite.core as core\n'), ((65955, 66027), 'bayeslite.core.bayesdb_variable_number', 'core.bayesdb_variable_number', (['bdb', 'population_id', 'generator_id', '"""gender"""'], {}), "(bdb, population_id, generator_id, 'gender')\n", (65983, 66027), True, 'import bayeslite.core as core\n'), ((70199, 70307), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""pe"""', '"""t0"""'], {'overrides': "[('age', 'numerical'), ('rank', 'numerical')]"}), "(bdb, 'pe', 't0', overrides=[('age',\n 'numerical'), ('rank', 'numerical')])\n", (70229, 70307), True, 'import bayeslite.guess as guess\n'), ((71621, 71635), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (71633, 71635), False, 'import test_core\n'), ((72705, 72749), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (72730, 72749), False, 'import test_csv\n'), ((74926, 74971), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (74956, 74971), True, 'import bayeslite.guess as guess\n'), ((94030, 94074), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (94055, 94074), False, 'import test_csv\n'), ((95446, 95490), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (95471, 95490), False, 'import test_csv\n'), ((96341, 96420), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {'overrides': "[('age', 'numerical')]"}), "(bdb, 'p', 't', overrides=[('age', 'numerical')])\n", (96371, 96420), True, 'import bayeslite.guess as guess\n'), ((96876, 96953), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p0"""', '"""t"""'], {'overrides': "[('age', 'ignore')]"}), "(bdb, 'p0', 't', overrides=[('age', 'ignore')])\n", (96906, 96953), True, 'import bayeslite.guess as guess\n'), ((97032, 97069), 'bayeslite.core.bayesdb_get_population', 'core.bayesdb_get_population', (['bdb', '"""p"""'], {}), "(bdb, 'p')\n", (97059, 97069), True, 'import bayeslite.core as core\n'), ((97086, 97147), 'bayeslite.core.bayesdb_variable_number', 'core.bayesdb_variable_number', (['bdb', 'population_id', 'None', '"""age"""'], {}), "(bdb, population_id, None, 'age')\n", (97114, 97147), True, 'import bayeslite.core as core\n'), ((98063, 98108), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (98093, 98108), True, 'import bayeslite.guess as guess\n'), ((99123, 99147), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', ([], {}), '()\n', (99145, 99147), False, 'import bayeslite\n'), ((99639, 99676), 'bayeslite.core.bayesdb_get_population', 'core.bayesdb_get_population', (['bdb', '"""p"""'], {}), "(bdb, 'p')\n", (99666, 99676), True, 'import bayeslite.core as core\n'), ((100447, 100508), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""height"""'], {}), "(bdb, population_id, None, 'height')\n", (100472, 100508), True, 'import bayeslite.core as core\n'), ((100888, 100947), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""rank"""'], {}), "(bdb, population_id, None, 'rank')\n", (100913, 100947), True, 'import bayeslite.core as core\n'), ((100963, 101026), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""division"""'], {}), "(bdb, population_id, None, 'division')\n", (100988, 101026), True, 'import bayeslite.core as core\n'), ((101609, 101670), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""weight"""'], {}), "(bdb, population_id, None, 'weight')\n", (101634, 101670), True, 'import bayeslite.core as core\n'), ((101697, 101741), 'test_csv.bayesdb_csv_file', 'test_csv.bayesdb_csv_file', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (101722, 101741), False, 'import test_csv\n'), ((107145, 107179), 'test_core.bayesdb', 'test_core.bayesdb', ([], {'backend': 'backend'}), '(backend=backend)\n', (107162, 107179), False, 'import test_core\n'), ((109839, 109855), 'bayeslite.util.cursor_value', 'cursor_value', (['c0'], {}), '(c0)\n', (109851, 109855), False, 'from bayeslite.util import cursor_value\n'), ((110020, 110036), 'bayeslite.util.cursor_value', 'cursor_value', (['c1'], {}), '(c1)\n', (110032, 110036), False, 'from bayeslite.util import cursor_value\n'), ((110108, 110127), 'test_core.bayesdb', 'test_core.bayesdb', ([], {}), '()\n', (110125, 110127), False, 'import test_core\n'), ((110365, 110415), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""pfoo"""', '"""foo"""'], {}), "(bdb, 'pfoo', 'foo')\n", (110395, 110415), True, 'import bayeslite.guess as guess\n'), ((110450, 110464), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (110462, 110464), False, 'import test_core\n'), ((114616, 114630), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (114628, 114630), False, 'import test_core\n'), ((116076, 116090), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (116088, 116090), False, 'import test_core\n'), ((116904, 116918), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (116916, 116918), False, 'import test_core\n'), ((117274, 117288), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (117286, 117288), False, 'import test_core\n'), ((117571, 117585), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (117583, 117585), False, 'import test_core\n'), ((117978, 117992), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (117990, 117992), False, 'import test_core\n'), ((119143, 119167), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', ([], {}), '()\n', (119165, 119167), False, 'import bayeslite\n'), ((120993, 121017), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', ([], {}), '()\n', (121015, 121017), False, 'import bayeslite\n'), ((121744, 121777), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', ([], {'seed': 'seed'}), '(seed=seed)\n', (121766, 121777), False, 'import bayeslite\n'), ((123211, 123225), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (123223, 123225), False, 'import test_core\n'), ((124961, 124975), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (124973, 124975), False, 'import test_core\n'), ((125810, 125824), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (125822, 125824), False, 'import test_core\n'), ((126709, 126723), 'test_core.t1', 'test_core.t1', ([], {}), '()\n', (126721, 126723), False, 'import test_core\n'), ((1739, 1759), 'bayeslite.ast.is_query', 'ast.is_query', (['phrase'], {}), '(phrase)\n', (1751, 1759), True, 'import bayeslite.ast as ast\n'), ((1772, 1812), 'bayeslite.compiler.compile_query', 'compiler.compile_query', (['bdb', 'phrase', 'out'], {}), '(bdb, phrase, out)\n', (1794, 1812), True, 'import bayeslite.compiler as compiler\n'), ((2734, 2754), 'bayeslite.ast.is_query', 'ast.is_query', (['phrase'], {}), '(phrase)\n', (2746, 2754), True, 'import bayeslite.ast as ast\n'), ((2767, 2807), 'bayeslite.compiler.compile_query', 'compiler.compile_query', (['bdb', 'phrase', 'out'], {}), '(bdb, phrase, out)\n', (2789, 2807), True, 'import bayeslite.compiler as compiler\n'), ((3401, 3466), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (3427, 3466), False, 'import bayeslite\n'), ((3861, 3926), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (3887, 3926), False, 'import bayeslite\n'), ((3940, 3963), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (3953, 3963), False, 'import pytest\n'), ((4331, 4396), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (4357, 4396), False, 'import bayeslite\n'), ((4571, 4594), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (4584, 4594), False, 'import pytest\n'), ((4893, 4958), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (4919, 4958), False, 'import bayeslite\n'), ((5154, 5177), 'pytest.raises', 'pytest.raises', (['BQLError'], {}), '(BQLError)\n', (5167, 5177), False, 'import pytest\n'), ((11871, 11896), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11884, 11896), False, 'import pytest\n'), ((11939, 11964), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11952, 11964), False, 'import pytest\n'), ((12008, 12033), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12021, 12033), False, 'import pytest\n'), ((62460, 62525), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (62486, 62525), False, 'import bayeslite\n'), ((62758, 62845), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)', 'ifnotexists': '(True)'}), "(bdb, 't', f, header=True, create=True,\n ifnotexists=True)\n", (62784, 62845), False, 'import bayeslite\n'), ((62925, 62950), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (62938, 62950), False, 'import pytest\n'), ((62964, 63009), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (62994, 63009), True, 'import bayeslite.guess as guess\n'), ((63199, 63232), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (63212, 63232), False, 'import pytest\n'), ((63445, 63478), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (63458, 63478), False, 'import pytest\n'), ((63549, 63582), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (63562, 63582), False, 'import pytest\n'), ((63838, 63871), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (63851, 63871), False, 'import pytest\n'), ((64264, 64311), 'bayeslite.core.bayesdb_generator_table', 'core.bayesdb_generator_table', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (64292, 64311), True, 'import bayeslite.core as core\n'), ((64383, 64430), 'bayeslite.core.bayesdb_generator_table', 'core.bayesdb_generator_table', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (64411, 64430), True, 'import bayeslite.core as core\n'), ((64502, 64549), 'bayeslite.core.bayesdb_generator_table', 'core.bayesdb_generator_table', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (64530, 64549), True, 'import bayeslite.core as core\n'), ((64626, 64674), 'bayeslite.core.bayesdb_population_name', 'core.bayesdb_population_name', (['bdb', 'population_id'], {}), '(bdb, population_id)\n', (64654, 64674), True, 'import bayeslite.core as core\n'), ((64752, 64800), 'bayeslite.core.bayesdb_population_name', 'core.bayesdb_population_name', (['bdb', 'population_id'], {}), '(bdb, population_id)\n', (64780, 64800), True, 'import bayeslite.core as core\n'), ((64879, 64927), 'bayeslite.core.bayesdb_population_name', 'core.bayesdb_population_name', (['bdb', 'population_id'], {}), '(bdb, population_id)\n', (64907, 64927), True, 'import bayeslite.core as core\n'), ((65058, 65105), 'bayeslite.core.bayesdb_generator_table', 'core.bayesdb_generator_table', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (65086, 65105), True, 'import bayeslite.core as core\n'), ((65188, 65234), 'bayeslite.core.bayesdb_generator_name', 'core.bayesdb_generator_name', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (65215, 65234), True, 'import bayeslite.core as core\n'), ((65337, 65383), 'bayeslite.core.bayesdb_generator_name', 'core.bayesdb_generator_name', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (65364, 65383), True, 'import bayeslite.core as core\n'), ((65471, 65517), 'bayeslite.core.bayesdb_generator_name', 'core.bayesdb_generator_name', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (65498, 65517), True, 'import bayeslite.core as core\n'), ((65605, 65651), 'bayeslite.core.bayesdb_generator_name', 'core.bayesdb_generator_name', (['bdb', 'generator_id'], {}), '(bdb, generator_id)\n', (65632, 65651), True, 'import bayeslite.core as core\n'), ((65735, 65768), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (65748, 65768), False, 'import pytest\n'), ((66053, 66087), 'pytest.raises', 'pytest.raises', (['parse.BQLParseError'], {}), '(parse.BQLParseError)\n', (66066, 66087), False, 'import pytest\n'), ((66243, 66277), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (66256, 66277), False, 'import pytest\n'), ((68202, 68235), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (68215, 68235), False, 'import pytest\n'), ((68991, 69024), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (69004, 69024), False, 'import pytest\n'), ((69316, 69364), 'bayeslite.core.bayesdb_population_name', 'core.bayesdb_population_name', (['bdb', 'population_id'], {}), '(bdb, population_id)\n', (69344, 69364), True, 'import bayeslite.core as core\n'), ((69929, 69962), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (69942, 69962), False, 'import pytest\n'), ((70054, 70087), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (70067, 70087), False, 'import pytest\n'), ((70430, 70463), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (70443, 70463), False, 'import pytest\n'), ((70788, 70821), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (70801, 70821), False, 'import pytest\n'), ((70903, 70936), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (70916, 70936), False, 'import pytest\n'), ((71005, 71038), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (71018, 71038), False, 'import pytest\n'), ((71098, 71131), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (71111, 71131), False, 'import pytest\n'), ((71202, 71235), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (71215, 71235), False, 'import pytest\n'), ((71325, 71358), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (71338, 71358), False, 'import pytest\n'), ((72816, 72881), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (72842, 72881), False, 'import bayeslite\n'), ((74148, 74173), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (74161, 74173), False, 'import pytest\n'), ((94141, 94206), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (94167, 94206), False, 'import bayeslite\n'), ((94259, 94338), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {'overrides': "[('age', 'numerical')]"}), "(bdb, 'p', 't', overrides=[('age', 'numerical')])\n", (94289, 94338), True, 'import bayeslite.guess as guess\n'), ((95521, 95549), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (95534, 95549), False, 'import pytest\n'), ((95650, 95683), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (95663, 95683), False, 'import pytest\n'), ((95794, 95827), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (95807, 95827), False, 'import pytest\n'), ((95978, 96043), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (96004, 96043), False, 'import bayeslite\n'), ((96498, 96531), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (96511, 96531), False, 'import pytest\n'), ((96652, 96685), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (96665, 96685), False, 'import pytest\n'), ((97163, 97226), 'bayeslite.core.bayesdb_variable_stattype', 'core.bayesdb_variable_stattype', (['bdb', 'population_id', 'None', 'colno'], {}), '(bdb, population_id, None, colno)\n', (97193, 97226), True, 'import bayeslite.core as core\n'), ((97319, 97352), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (97332, 97352), False, 'import pytest\n'), ((97407, 97440), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (97420, 97440), False, 'import pytest\n'), ((97543, 97576), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (97556, 97576), False, 'import pytest\n'), ((97638, 97671), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (97651, 97671), False, 'import pytest\n'), ((97989, 98054), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (98015, 98054), False, 'import bayeslite\n'), ((98420, 98453), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (98433, 98453), False, 'import pytest\n'), ((98584, 98612), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (98597, 98612), False, 'import pytest\n'), ((99214, 99250), 'StringIO.StringIO', 'StringIO.StringIO', (['test_csv.csv_data'], {}), '(test_csv.csv_data)\n', (99231, 99250), False, 'import StringIO\n'), ((99798, 99831), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (99811, 99831), False, 'import pytest\n'), ((99963, 99996), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (99976, 99996), False, 'import pytest\n'), ((100137, 100170), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (100150, 100170), False, 'import pytest\n'), ((100297, 100358), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""height"""'], {}), "(bdb, population_id, None, 'height')\n", (100322, 100358), True, 'import bayeslite.core as core\n'), ((100575, 100634), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""rank"""'], {}), "(bdb, population_id, None, 'rank')\n", (100600, 100634), True, 'import bayeslite.core as core\n'), ((100654, 100717), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""division"""'], {}), "(bdb, population_id, None, 'division')\n", (100679, 100717), True, 'import bayeslite.core as core\n'), ((101203, 101236), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (101216, 101236), False, 'import pytest\n'), ((101334, 101395), 'bayeslite.core.bayesdb_has_variable', 'core.bayesdb_has_variable', (['bdb', 'population_id', 'None', '"""weight"""'], {}), "(bdb, population_id, None, 'weight')\n", (101359, 101395), True, 'import bayeslite.core as core\n'), ((101840, 101880), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (101853, 101880), False, 'import pytest\n'), ((101931, 101971), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (101944, 101971), False, 'import pytest\n'), ((102407, 102447), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102420, 102447), False, 'import pytest\n'), ((102498, 102538), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102511, 102538), False, 'import pytest\n'), ((102827, 102867), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102840, 102867), False, 'import pytest\n'), ((102918, 102958), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102931, 102958), False, 'import pytest\n'), ((103314, 103359), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (103344, 103359), True, 'import bayeslite.guess as guess\n'), ((103482, 103510), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (103495, 103510), False, 'import pytest\n'), ((103568, 103601), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (103581, 103601), False, 'import pytest\n'), ((103966, 104011), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (103996, 104011), True, 'import bayeslite.guess as guess\n'), ((104510, 104543), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (104523, 104543), False, 'import pytest\n'), ((104603, 104631), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (104616, 104631), False, 'import pytest\n'), ((104988, 105033), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (105018, 105033), True, 'import bayeslite.guess as guess\n'), ((105530, 105563), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (105543, 105563), False, 'import pytest\n'), ((105623, 105651), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (105636, 105651), False, 'import pytest\n'), ((105998, 106043), 'bayeslite.guess.bayesdb_guess_population', 'guess.bayesdb_guess_population', (['bdb', '"""p"""', '"""t"""'], {}), "(bdb, 'p', 't')\n", (106028, 106043), True, 'import bayeslite.guess as guess\n'), ((106595, 106623), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (106608, 106623), False, 'import pytest\n'), ((110052, 110066), 'bayeslite.math_util.relerr', 'relerr', (['v0', 'v1'], {}), '(v0, v1)\n', (110058, 110066), False, 'from bayeslite.math_util import relerr\n'), ((110519, 110552), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (110532, 110552), False, 'import pytest\n'), ((110701, 110734), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (110714, 110734), False, 'import pytest\n'), ((110890, 110923), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (110903, 110923), False, 'import pytest\n'), ((111090, 111123), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (111103, 111123), False, 'import pytest\n'), ((111289, 111322), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (111302, 111322), False, 'import pytest\n'), ((111741, 111775), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (111754, 111775), False, 'import pytest\n'), ((111909, 111942), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (111922, 111942), False, 'import pytest\n'), ((112075, 112108), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (112088, 112108), False, 'import pytest\n'), ((112230, 112263), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (112243, 112263), False, 'import pytest\n'), ((112530, 112563), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (112543, 112563), False, 'import pytest\n'), ((112960, 112998), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLParseError'], {}), '(bayeslite.BQLParseError)\n', (112973, 112998), False, 'import pytest\n'), ((113519, 113544), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (113532, 113544), False, 'import pytest\n'), ((113607, 113632), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (113620, 113632), False, 'import pytest\n'), ((113704, 113729), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (113717, 113729), False, 'import pytest\n'), ((113789, 113814), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (113802, 113814), False, 'import pytest\n'), ((113873, 113897), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (113886, 113897), False, 'import pytest\n'), ((113952, 113986), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (113965, 113986), False, 'import pytest\n'), ((114099, 114133), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (114112, 114133), False, 'import pytest\n'), ((114276, 114309), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (114289, 114309), False, 'import pytest\n'), ((114975, 115020), 'bayeslite.core.bayesdb_has_table', 'core.bayesdb_has_table', (['bdb', '"""bayesdb_temp_0"""'], {}), "(bdb, 'bayesdb_temp_0')\n", (114997, 115020), True, 'import bayeslite.core as core\n'), ((115040, 115085), 'bayeslite.core.bayesdb_has_table', 'core.bayesdb_has_table', (['bdb', '"""bayesdb_temp_1"""'], {}), "(bdb, 'bayesdb_temp_1')\n", (115062, 115085), True, 'import bayeslite.core as core\n'), ((116309, 116343), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (116322, 116343), False, 'import pytest\n'), ((116567, 116601), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (116580, 116601), False, 'import pytest\n'), ((117796, 117829), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (117809, 117829), False, 'import pytest\n'), ((118152, 118185), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (118165, 118185), False, 'import pytest\n'), ((118291, 118324), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (118304, 118324), False, 'import pytest\n'), ((120240, 120264), 'bayeslite.bayesdb_open', 'bayeslite.bayesdb_open', ([], {}), '()\n', (120262, 120264), False, 'import bayeslite\n'), ((125132, 125160), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (125145, 125160), False, 'import pytest\n'), ((126426, 126445), 'pytest.raises', 'pytest.raises', (['Boom'], {}), '(Boom)\n', (126439, 126445), False, 'import pytest\n'), ((2534, 2595), 'bayeslite.compiler.Output', 'compiler.Output', (['phrase.n_numpar', 'phrase.nampar_map', 'bindings'], {}), '(phrase.n_numpar, phrase.nampar_map, bindings)\n', (2549, 2595), True, 'import bayeslite.compiler as compiler\n'), ((2695, 2714), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (2712, 2714), False, 'import StringIO\n'), ((62580, 62605), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (62593, 62605), False, 'import pytest\n'), ((62623, 62688), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (62649, 62688), False, 'import bayeslite\n'), ((66380, 66449), 'bayeslite.core.bayesdb_variable_number', 'core.bayesdb_variable_number', (['bdb', 'population_id', 'generator_id', '"""sex"""'], {}), "(bdb, population_id, generator_id, 'sex')\n", (66408, 66449), True, 'import bayeslite.core as core\n'), ((66646, 66715), 'bayeslite.core.bayesdb_variable_number', 'core.bayesdb_variable_number', (['bdb', 'population_id', 'generator_id', '"""sex"""'], {}), "(bdb, population_id, generator_id, 'sex')\n", (66674, 66715), True, 'import bayeslite.core as core\n'), ((66838, 66867), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (66851, 66867), False, 'import pytest\n'), ((67017, 67050), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (67030, 67050), False, 'import pytest\n'), ((67293, 67326), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (67306, 67326), False, 'import pytest\n'), ((67450, 67483), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (67463, 67483), False, 'import pytest\n'), ((67678, 67750), 'bayeslite.core.bayesdb_variable_number', 'core.bayesdb_variable_number', (['bdb', 'population_id', 'generator_id', '"""gender"""'], {}), "(bdb, population_id, generator_id, 'gender')\n", (67706, 67750), True, 'import bayeslite.core as core\n'), ((96229, 96316), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)', 'ifnotexists': '(True)'}), "(bdb, 't', f, header=True, create=True,\n ifnotexists=True)\n", (96255, 96316), False, 'import bayeslite\n'), ((102187, 102227), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102200, 102227), False, 'import pytest\n'), ((102683, 102723), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (102696, 102723), False, 'import pytest\n'), ((103162, 103227), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (103188, 103227), False, 'import bayeslite\n'), ((103814, 103879), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (103840, 103879), False, 'import bayeslite\n'), ((104085, 104118), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (104098, 104118), False, 'import pytest\n'), ((104226, 104259), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (104239, 104259), False, 'import pytest\n'), ((104367, 104395), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (104380, 104395), False, 'import pytest\n'), ((104836, 104901), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (104862, 104901), False, 'import bayeslite\n'), ((105107, 105140), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (105120, 105140), False, 'import pytest\n'), ((105248, 105281), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (105261, 105281), False, 'import pytest\n'), ((105389, 105417), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (105402, 105417), False, 'import pytest\n'), ((105846, 105911), 'bayeslite.bayesdb_read_csv', 'bayeslite.bayesdb_read_csv', (['bdb', '"""t"""', 'f'], {'header': '(True)', 'create': '(True)'}), "(bdb, 't', f, header=True, create=True)\n", (105872, 105911), False, 'import bayeslite\n'), ((106849, 106889), 'pytest.raises', 'pytest.raises', (['bayeslite.BayesDBTxnError'], {}), '(bayeslite.BayesDBTxnError)\n', (106862, 106889), False, 'import pytest\n'), ((111586, 111619), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (111599, 111619), False, 'import pytest\n'), ((112788, 112821), 'pytest.raises', 'pytest.raises', (['bayeslite.BQLError'], {}), '(bayeslite.BQLError)\n', (112801, 112821), False, 'import pytest\n'), ((113370, 113398), 'pytest.raises', 'pytest.raises', (['apsw.SQLError'], {}), '(apsw.SQLError)\n', (113383, 113398), False, 'import pytest\n')]
# -*- coding: utf-8 -*- """ Created on Fri Aug 30 20:15:18 2019 @author: autol """ #%% from plotxy import plot_gd_xy,iters_gd_plot,plot_gd_contour from initdata import init_data,init_data1,data_b,init_data_house from func import gradient_descent_f from varclass import VarSetX from sklearn.model_selection import ParameterGrid import matplotlib.pyplot as plt import numpy as np #%% Example n=20 w = np.ones(2);w X,y=init_data1(n,45,w,b=0);X # eta = 1e-2 #X,y=init_data_house(n,45,w);X # 1e-7 X_b = data_b(X);X_b y #%% B_b = np.linalg.inv(X_b.T.dot(X_b)) @ (X_b.T.dot(y));B_b B = np.linalg.inv(X.T.dot(X)) @ (X.T.dot(y));B #%% #w = np.array([-2.5,-2.5]);w #w = np.array([0.,0.]);w A = 2./len(y)*X.T.dot(X) # ŋ=1 # 海森矩阵 J = lambda w: np.mean((X.dot(w)-y)**2) # 目标函数 gJ = lambda w: 2./len(y)*X.T.dot(X.dot(w)-y) # 梯度函数 #A = X.T@X # ŋ=1/n #J = lambda w: w.dot(A).dot(w) #gJ = lambda w: A.dot(w) pgrid =list(ParameterGrid(dict(sgd=[0,1], isStep=[0], # ρ=[.5,5,10], # n_b=[2,5], # ŋ_a=[1], # ŋ_a 要大于1 method=['mm21','mm22','mm23','mm24','mm25'], #method=['mm31','mm32','mm33','mm34','mm30'], #method=['mm40','mm41','mm42','mm43','mm44','mm45','mm46'], #method=['mm51','mm52','mm53','mm54','mm55'], #method=['mm10'], #method=['mm90','mm91','mm92','mm93','mm94',], ))) skwargs = dict(A=A,ŋ=.1,ŋ_a=1,tol=0.05, ε=.001,λ=.1,α=.5,γ=0.5,β1=.9,β2=.999) wws=[];ess=[];rets=[] for pg in pgrid: w0 = w.copy()-np.random.uniform(1,3,2) #任意起点 kwargs=dict(X=X.copy(),y=y.copy(), gJ=gJ,J=J,w=w0,) kwargs.update(skwargs) ; kwargs.update(pg) ; var = VarSetX(kwargs) ret = gradient_descent_f(var,n_iters=20,skipConv=0, **kwargs) ww = np.stack(ret['wh'][:,1]) es = ret['wh'][:,2] wws.append(ww); ess.append(es); rets.append(ret) print(ww,es) #%% x = np.zeros(len(w));x x = np.vstack([x, np.amax(X,axis=0)]);x x_b = data_b(x) yh = x.dot(B); yh fig, ax = plt.subplots(figsize = (8,8)) ax.plot(X[:,0],y,'o') ax.plot(x[:,0],yh,color='b',linewidth=5) ws = [ww[int(i)] for i in np.linspace(0,len(ww)-1,10)] for wx in ws: yh = x.dot(wx);yh # 画渐近的基准线 ax.plot(x[:,0],yh,color='r') ax.set_xlabel('x') ax.set_ylabel('y') #%% plot_gd_contour(J,wws,ess,pgrid,skwargs,B) #%% paras = skwargs.copy() paras.pop('A') iters_gd_plot(rets,var,pgrid,paras=paras, **kwargs)
[ "numpy.stack", "numpy.random.uniform", "varclass.VarSetX", "plotxy.plot_gd_contour", "numpy.ones", "initdata.data_b", "plotxy.iters_gd_plot", "numpy.amax", "initdata.init_data1", "func.gradient_descent_f", "matplotlib.pyplot.subplots" ]
[((402, 412), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (409, 412), True, 'import numpy as np\n'), ((419, 444), 'initdata.init_data1', 'init_data1', (['n', '(45)', 'w'], {'b': '(0)'}), '(n, 45, w, b=0)\n', (429, 444), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((502, 511), 'initdata.data_b', 'data_b', (['X'], {}), '(X)\n', (508, 511), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((2015, 2024), 'initdata.data_b', 'data_b', (['x'], {}), '(x)\n', (2021, 2024), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((2054, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2066, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2328, 2375), 'plotxy.plot_gd_contour', 'plot_gd_contour', (['J', 'wws', 'ess', 'pgrid', 'skwargs', 'B'], {}), '(J, wws, ess, pgrid, skwargs, B)\n', (2343, 2375), False, 'from plotxy import plot_gd_xy, iters_gd_plot, plot_gd_contour\n'), ((2414, 2468), 'plotxy.iters_gd_plot', 'iters_gd_plot', (['rets', 'var', 'pgrid'], {'paras': 'paras'}), '(rets, var, pgrid, paras=paras, **kwargs)\n', (2427, 2468), False, 'from plotxy import plot_gd_xy, iters_gd_plot, plot_gd_contour\n'), ((1700, 1715), 'varclass.VarSetX', 'VarSetX', (['kwargs'], {}), '(kwargs)\n', (1707, 1715), False, 'from varclass import VarSetX\n'), ((1726, 1783), 'func.gradient_descent_f', 'gradient_descent_f', (['var'], {'n_iters': '(20)', 'skipConv': '(0)'}), '(var, n_iters=20, skipConv=0, **kwargs)\n', (1744, 1783), False, 'from func import gradient_descent_f\n'), ((1821, 1846), 'numpy.stack', 'np.stack', (["ret['wh'][:, 1]"], {}), "(ret['wh'][:, 1])\n", (1829, 1846), True, 'import numpy as np\n'), ((1541, 1567), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1558, 1567), True, 'import numpy as np\n'), ((1987, 2005), 'numpy.amax', 'np.amax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1994, 2005), True, 'import numpy as np\n')]
# ServiceSchema.py from __future__ import print_function from __future__ import absolute_import from optparse import OptionParser, OptionValueError import os import platform as plat import sys if sys.version_info >= (3, 8) and plat.system().lower() == "windows": # pylint: disable=no-member with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')): import blpapi else: import blpapi REFERENCE_DATA_RESPONSE = blpapi.Name("ReferenceDataResponse") ELEMENT_DATATYPE_NAMES = { blpapi.DataType.BOOL: "BOOL", blpapi.DataType.CHAR: "CHAR", blpapi.DataType.BYTE: "BYTE", blpapi.DataType.INT32: "INT32", blpapi.DataType.INT64: "INT64", blpapi.DataType.FLOAT32: "FLOAT32", blpapi.DataType.FLOAT64: "FLOAT64", blpapi.DataType.STRING: "STRING", blpapi.DataType.BYTEARRAY: "BYTEARRAY", blpapi.DataType.DATE: "DATE", blpapi.DataType.TIME: "TIME", blpapi.DataType.DECIMAL: "DECIMAL", blpapi.DataType.DATETIME: "DATETIME", blpapi.DataType.ENUMERATION: "ENUMERATION", blpapi.DataType.SEQUENCE: "SEQUENCE", blpapi.DataType.CHOICE: "CHOICE", blpapi.DataType.CORRELATION_ID: "CORRELATION_ID" } SCHEMA_STATUS_NAMES = { blpapi.SchemaStatus.ACTIVE: "ACTIVE", blpapi.SchemaStatus.DEPRECATED: "DEPRECATED", blpapi.SchemaStatus.INACTIVE: "INACTIVE", blpapi.SchemaStatus.PENDING_DEPRECATION: "PENDING" } def authOptionCallback(_option, _opt, value, parser): """Parse authorization options from user input""" vals = value.split('=', 1) if value == "user": authUser = blpapi.AuthUser.createWithLogonName() authOptions = blpapi.AuthOptions.createWithUser(authUser) elif value == "none": authOptions = None elif vals[0] == "app" and len(vals) == 2: appName = vals[1] authOptions = blpapi.AuthOptions.createWithApp(appName) elif vals[0] == "userapp" and len(vals) == 2: appName = vals[1] authUser = blpapi.AuthUser.createWithLogonName() authOptions = blpapi.AuthOptions\ .createWithUserAndApp(authUser, appName) elif vals[0] == "dir" and len(vals) == 2: activeDirectoryProperty = vals[1] authUser = blpapi.AuthUser\ .createWithActiveDirectoryProperty(activeDirectoryProperty) authOptions = blpapi.AuthOptions.createWithUser(authUser) elif vals[0] == "manual": parts = [] if len(vals) == 2: parts = vals[1].split(',') if len(parts) != 3: raise OptionValueError("Invalid auth option {}".format(value)) appName, ip, userId = parts authUser = blpapi.AuthUser.createWithManualOptions(userId, ip) authOptions = blpapi.AuthOptions.createWithUserAndApp(authUser, appName) else: raise OptionValueError("Invalid auth option '{}'".format(value)) parser.values.auth = {'option' : authOptions} def parseCmdLine(): parser = OptionParser() parser.add_option("-a", "--host", dest="host", help="HOST address to connect to", metavar="HOST", default="localhost") parser.add_option("-p", "--port", dest="port", type="int", help="PORT to connect to (%default)", metavar="PORT", default=8194) parser.add_option("-s", "--service", default="//blp/apiflds", help="SERVICE to print the schema of " "('//blp/apiflds' by default)") parser.add_option("--auth", dest="auth", help="authentication option: " "user|none|app=<app>|userapp=<app>|dir=<property>" "|manual=<app,ip,user>" " (default: user)\n" "'none' is applicable to Desktop API product " "that requires Bloomberg Professional service " "to be installed locally.", metavar="option", action="callback", callback=authOptionCallback, type="string", default={"option" : blpapi.AuthOptions.createWithUser( blpapi.AuthUser.createWithLogonName())}) (options, _) = parser.parse_args() return options def printMessage(msg): print("[{0}]: {1}".format(", ".join(map(str, msg.correlationIds())), msg)) def getIndent(level): return "" if level == 0 else " ".ljust(level * 2) # Print enumeration (constant list) def printEnumeration(cl, level): indent = getIndent(level + 1) print(indent + " {0} {1} {2} \"{3}\" possible values:".format( cl.name(), SCHEMA_STATUS_NAMES[cl.status()], ELEMENT_DATATYPE_NAMES[cl.datatype()], cl.description())) # Enumerate and print all constant list's values (constants) for i in cl: print(indent + " {0} {1} {2} \"{3}\" = {4!s}".format( i.name(), SCHEMA_STATUS_NAMES[i.status()], ELEMENT_DATATYPE_NAMES[i.datatype()], i.description(), i.getValue())) # Recursively print element definition def printElementDefinition(ed, level=0): indent = getIndent(level) maxValues = ed.maxValues() if maxValues == blpapi.SchemaElementDefinition.UNBOUNDED: valuesRange = "[{0}, INF)".format(ed.minValues()) else: valuesRange = "[{0}, {1}]".format(ed.minValues(), maxValues) # Get and print alternate element names alternateNames = ed.alternateNames() if alternateNames: alternateNames = "[{0}]".format(",".join(map(str, alternateNames))) else: alternateNames = "" print(indent + "* {0} {1} {2} {3} \"{4}\"".format( ed.name(), SCHEMA_STATUS_NAMES[ed.status()], valuesRange, alternateNames, ed.description())) # Get and print related type definition td = ed.typeDefinition() print(indent + " {0} {1} {2} {3}{4}{5}\"{6}\"".format( td.name(), SCHEMA_STATUS_NAMES[td.status()], ELEMENT_DATATYPE_NAMES[td.datatype()], "complex " if td.isComplexType() else "", "simple " if td.isSimpleType() else "", "enum " if td.isEnumerationType() else "", td.description())) # Get and print all possible values for enumeration type enumeration = td.enumeration() if not enumeration is None: printEnumeration(enumeration, level) if td.numElementDefinitions(): print(indent + " Elements[{0}]:".format( td.numElementDefinitions())) # Enumerate and print all sub-element definitions for i in td.elementDefinitions(): printElementDefinition(i, level + 1) def printOperation(operation, _service): print("{0} \"{1}\" Request:".format( operation.name(), operation.description())) # Print operation's request definition printElementDefinition(operation.requestDefinition(), 1) print("Responses[{0}]:".format(operation.numResponseDefinitions())) # Enumerate and print all operation's response definitions for r in operation.responseDefinitions(): printElementDefinition(r, 1) print() def main(): options = parseCmdLine() # Fill SessionOptions sessionOptions = blpapi.SessionOptions() sessionOptions.setServerHost(options.host) sessionOptions.setServerPort(options.port) sessionOptions.setSessionIdentityOptions(options.auth['option']) # Create a Session session = blpapi.Session(sessionOptions) # Start a Session if not session.start(): raise Exception("Can't start session.") try: print("Session started.") # Open service to get reference data from if not session.openService(options.service): raise Exception("Can't open '{0}' service.".format( options.service)) # Obtain previously opened service service = session.getService(options.service) print("Service {0}:".format(options.service)) print("Service event definitions[{0}]:".format( service.numEventDefinitions())) # Enumerate and print all service's event definitions for ed in service.eventDefinitions(): printElementDefinition(ed) print() print("Operations[{0}]:".format(service.numOperations())) # Enumerate and print all service's operations for operation in service.operations(): printOperation(operation, service) finally: # Stop the session session.stop() if __name__ == "__main__": print("ServiceSchema") try: main() except KeyboardInterrupt: print("Ctrl+C pressed. Stopping...") __copyright__ = """ Copyright 2012. Bloomberg Finance L.P. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
[ "blpapi.AuthUser.createWithManualOptions", "optparse.OptionParser", "blpapi.AuthOptions.createWithUserAndApp", "blpapi.AuthOptions.createWithUser", "blpapi.AuthOptions.createWithApp", "blpapi.SessionOptions", "blpapi.Name", "platform.system", "blpapi.Session", "blpapi.AuthUser.createWithActiveDirectoryProperty", "os.getenv", "blpapi.AuthUser.createWithLogonName" ]
[((429, 465), 'blpapi.Name', 'blpapi.Name', (['"""ReferenceDataResponse"""'], {}), "('ReferenceDataResponse')\n", (440, 465), False, 'import blpapi\n'), ((2930, 2944), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (2942, 2944), False, 'from optparse import OptionParser, OptionValueError\n'), ((7587, 7610), 'blpapi.SessionOptions', 'blpapi.SessionOptions', ([], {}), '()\n', (7608, 7610), False, 'import blpapi\n'), ((7812, 7842), 'blpapi.Session', 'blpapi.Session', (['sessionOptions'], {}), '(sessionOptions)\n', (7826, 7842), False, 'import blpapi\n'), ((1570, 1607), 'blpapi.AuthUser.createWithLogonName', 'blpapi.AuthUser.createWithLogonName', ([], {}), '()\n', (1605, 1607), False, 'import blpapi\n'), ((1630, 1673), 'blpapi.AuthOptions.createWithUser', 'blpapi.AuthOptions.createWithUser', (['authUser'], {}), '(authUser)\n', (1663, 1673), False, 'import blpapi\n'), ((327, 353), 'os.getenv', 'os.getenv', (['"""BLPAPI_LIBDIR"""'], {}), "('BLPAPI_LIBDIR')\n", (336, 353), False, 'import os\n'), ((229, 242), 'platform.system', 'plat.system', ([], {}), '()\n', (240, 242), True, 'import platform as plat\n'), ((1821, 1862), 'blpapi.AuthOptions.createWithApp', 'blpapi.AuthOptions.createWithApp', (['appName'], {}), '(appName)\n', (1853, 1862), False, 'import blpapi\n'), ((1958, 1995), 'blpapi.AuthUser.createWithLogonName', 'blpapi.AuthUser.createWithLogonName', ([], {}), '()\n', (1993, 1995), False, 'import blpapi\n'), ((2018, 2076), 'blpapi.AuthOptions.createWithUserAndApp', 'blpapi.AuthOptions.createWithUserAndApp', (['authUser', 'appName'], {}), '(authUser, appName)\n', (2057, 2076), False, 'import blpapi\n'), ((4482, 4519), 'blpapi.AuthUser.createWithLogonName', 'blpapi.AuthUser.createWithLogonName', ([], {}), '()\n', (4517, 4519), False, 'import blpapi\n'), ((2198, 2272), 'blpapi.AuthUser.createWithActiveDirectoryProperty', 'blpapi.AuthUser.createWithActiveDirectoryProperty', (['activeDirectoryProperty'], {}), '(activeDirectoryProperty)\n', (2247, 2272), False, 'import blpapi\n'), ((2309, 2352), 'blpapi.AuthOptions.createWithUser', 'blpapi.AuthOptions.createWithUser', (['authUser'], {}), '(authUser)\n', (2342, 2352), False, 'import blpapi\n'), ((2629, 2680), 'blpapi.AuthUser.createWithManualOptions', 'blpapi.AuthUser.createWithManualOptions', (['userId', 'ip'], {}), '(userId, ip)\n', (2668, 2680), False, 'import blpapi\n'), ((2703, 2761), 'blpapi.AuthOptions.createWithUserAndApp', 'blpapi.AuthOptions.createWithUserAndApp', (['authUser', 'appName'], {}), '(authUser, appName)\n', (2742, 2761), False, 'import blpapi\n')]
from oraclecxcommerce.modules import ProfilesModule import pytest def test_instantiate_profile_module_class_should_return_not_implemented_error(): with pytest.raises(NotImplementedError): occ = ProfilesModule()
[ "oraclecxcommerce.modules.ProfilesModule", "pytest.raises" ]
[((158, 192), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (171, 192), False, 'import pytest\n'), ((208, 224), 'oraclecxcommerce.modules.ProfilesModule', 'ProfilesModule', ([], {}), '()\n', (222, 224), False, 'from oraclecxcommerce.modules import ProfilesModule\n')]
from aws_cdk import aws_iam, aws_sqs, core from common.common_stack import CommonStack from common.region_aware_stack import RegionAwareStack class SqsStack(RegionAwareStack): def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None: super().__init__(scope, id, **kwargs) self._supported_in_region = self.is_service_supported_in_region() # Test simply asserts the existence of a queue aws_sqs.Queue(self, "integ_test_sqs_queue") queue_policy = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["sqs:GetQueueAttributes"], resources=[f"arn:aws:sqs:{self.region}:{self.account}:*"], ) common_stack.add_to_common_role_policies(self, policy_to_add=queue_policy) all_resources_policy = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["sqs:ListQueues"], resources=["*"], ) common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
[ "aws_cdk.aws_iam.PolicyStatement", "aws_cdk.aws_sqs.Queue" ]
[((464, 507), 'aws_cdk.aws_sqs.Queue', 'aws_sqs.Queue', (['self', '"""integ_test_sqs_queue"""'], {}), "(self, 'integ_test_sqs_queue')\n", (477, 507), False, 'from aws_cdk import aws_iam, aws_sqs, core\n'), ((532, 689), 'aws_cdk.aws_iam.PolicyStatement', 'aws_iam.PolicyStatement', ([], {'effect': 'aws_iam.Effect.ALLOW', 'actions': "['sqs:GetQueueAttributes']", 'resources': "[f'arn:aws:sqs:{self.region}:{self.account}:*']"}), "(effect=aws_iam.Effect.ALLOW, actions=[\n 'sqs:GetQueueAttributes'], resources=[\n f'arn:aws:sqs:{self.region}:{self.account}:*'])\n", (555, 689), False, 'from aws_cdk import aws_iam, aws_sqs, core\n'), ((842, 944), 'aws_cdk.aws_iam.PolicyStatement', 'aws_iam.PolicyStatement', ([], {'effect': 'aws_iam.Effect.ALLOW', 'actions': "['sqs:ListQueues']", 'resources': "['*']"}), "(effect=aws_iam.Effect.ALLOW, actions=[\n 'sqs:ListQueues'], resources=['*'])\n", (865, 944), False, 'from aws_cdk import aws_iam, aws_sqs, core\n')]
########################################################## # pytorch-kaldi v.0.1 # <NAME>, <NAME> # Mila, University of Montreal # October 2018 # # Description: This script generates kaldi ark files containing raw features. # The file list must be a file containing "snt_id file.wav". # Note that only wav files are supported here (sphere or other format are not supported) ########################################################## import scipy.io.wavfile import math import numpy as np import os from data_io import read_vec_int_ark, write_mat # Run it for all the data chunks (e.g., train, dev, test) => uncomment lab_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test" lab_opts = "ali-to-pdf" out_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test" wav_lst = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst" scp_file_out = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp" # lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev' # lab_opts='ali-to-pdf' # out_folder='raw_TIMIT_200ms/dev' # wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp' # scp_file_out='quick_test/data/dev/feats_raw.scp' # lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test' # lab_opts='ali-to-pdf' # out_folder='raw_TIMIT_200ms/test' # wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp' # scp_file_out='quick_test/data/test/feats_raw.scp' sig_fs = 16000 # Hz sig_wlen = 200 # ms lab_fs = 16000 # Hz lab_wlen = 25 # ms lab_wshift = 10 # ms sig_wlen_samp = int((sig_fs * sig_wlen) / 1000) lab_wlen_samp = int((lab_fs * lab_wlen) / 1000) lab_wshift_samp = int((lab_fs * lab_wshift) / 1000) # Create the output folder try: os.stat(out_folder) except: os.makedirs(out_folder) # Creare the scp file scp_file = open(scp_file_out, "w") # reading the labels lab = { k: v for k, v in read_vec_int_ark( "gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|", out_folder ) } # reading the list file with open(wav_lst) as f: sig_lst = f.readlines() sig_lst = [x.strip() for x in sig_lst] for sig_file in sig_lst: sig_id = sig_file.split(" ")[0] sig_path = sig_file.split(" ")[1] [fs, signal] = scipy.io.wavfile.read(sig_path) signal = signal.astype(float) / 32768 signal = signal / np.max(np.abs(signal)) cnt_fr = 0 beg_samp = 0 frame_all = [] while beg_samp + lab_wlen_samp < signal.shape[0]: sample_fr = np.zeros(sig_wlen_samp) central_sample_lab = int(((beg_samp + lab_wlen_samp / 2) - 1)) central_fr_index = int(((sig_wlen_samp / 2) - 1)) beg_signal_fr = int(central_sample_lab - (sig_wlen_samp / 2)) end_signal_fr = int(central_sample_lab + (sig_wlen_samp / 2)) if beg_signal_fr >= 0 and end_signal_fr <= signal.shape[0]: sample_fr = signal[beg_signal_fr:end_signal_fr] else: if beg_signal_fr < 0: n_left_samples = central_sample_lab sample_fr[central_fr_index - n_left_samples + 1 :] = signal[0:end_signal_fr] if end_signal_fr > signal.shape[0]: n_right_samples = signal.shape[0] - central_sample_lab sample_fr[0 : central_fr_index + n_right_samples + 1] = signal[beg_signal_fr:] frame_all.append(sample_fr) cnt_fr = cnt_fr + 1 beg_samp = beg_samp + lab_wshift_samp frame_all = np.asarray(frame_all) # Save the matrix into a kaldi ark out_file = out_folder + "/" + sig_id + ".ark" write_mat(out_folder, out_file, frame_all, key=sig_id) print(sig_id) scp_file.write(sig_id + " " + out_folder + "/" + sig_id + ".ark:" + str(len(sig_id) + 1) + "\n") N_fr_comp = 1 + math.floor((signal.shape[0] - 400) / 160) # print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr)) scp_file.close()
[ "data_io.write_mat", "numpy.abs", "os.makedirs", "os.stat", "data_io.read_vec_int_ark", "numpy.asarray", "numpy.zeros", "math.floor" ]
[((1797, 1816), 'os.stat', 'os.stat', (['out_folder'], {}), '(out_folder)\n', (1804, 1816), False, 'import os\n'), ((3554, 3575), 'numpy.asarray', 'np.asarray', (['frame_all'], {}), '(frame_all)\n', (3564, 3575), True, 'import numpy as np\n'), ((3670, 3724), 'data_io.write_mat', 'write_mat', (['out_folder', 'out_file', 'frame_all'], {'key': 'sig_id'}), '(out_folder, out_file, frame_all, key=sig_id)\n', (3679, 3724), False, 'from data_io import read_vec_int_ark, write_mat\n'), ((1829, 1852), 'os.makedirs', 'os.makedirs', (['out_folder'], {}), '(out_folder)\n', (1840, 1852), False, 'import os\n'), ((1967, 2100), 'data_io.read_vec_int_ark', 'read_vec_int_ark', (["('gunzip -c ' + lab_folder + '/ali*.gz | ' + lab_opts + ' ' + lab_folder +\n '/final.mdl ark:- ark:-|')", 'out_folder'], {}), "('gunzip -c ' + lab_folder + '/ali*.gz | ' + lab_opts + ' ' +\n lab_folder + '/final.mdl ark:- ark:-|', out_folder)\n", (1983, 2100), False, 'from data_io import read_vec_int_ark, write_mat\n'), ((2596, 2619), 'numpy.zeros', 'np.zeros', (['sig_wlen_samp'], {}), '(sig_wlen_samp)\n', (2604, 2619), True, 'import numpy as np\n'), ((3865, 3906), 'math.floor', 'math.floor', (['((signal.shape[0] - 400) / 160)'], {}), '((signal.shape[0] - 400) / 160)\n', (3875, 3906), False, 'import math\n'), ((2453, 2467), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (2459, 2467), True, 'import numpy as np\n')]
#!/usr/bin/env python3 import re from .codes import codes class Emoji: def __init__(self, const): if len(const) == 1: self.__fromUnicode(const) elif const[0] == ":": self.__fromAlias(const) else: self.__fromEscape(const) self.aliases = codes[self.escape] self.alias = self.aliases[0] self.char = bytes("\\u"+self.escape, "ascii").decode("unicode-escape")[0] self.is_supported = hex(ord(self.char))[2:] == self.escape def __fromUnicode(self, char): escape = hex(ord(char))[2:] if escape in codes: self.escape = escape else: raise ValueError def __fromAlias(self, alias): for k, v in codes.items(): if alias in v: self.escape = k break else: raise ValueError def __fromEscape(self, escape): if escape in codes.keys(): self.escape = escape else: raise ValueError def replaceAliases(text, trailingSpaces=0, force=False): """ Replaces all supported emoji-cheat-sheet aliases in a text with the corresponding emoji. """ def replAlias(m): alias = ":"+m.group(1)+":" if not Emoji(alias).is_supported and not force: return alias try: return Emoji(alias).char + trailingSpaces * " " except ValueError: return alias return re.sub(":([^s:]?[\w-]+):", replAlias, text) def replaceEmoji(text, trailingSpaces=0): """ Replaces all emojis with their primary emoji-cheat-sheet alias. """ i = 0 while i < len(text): escape = hex(ord(text[i]))[2:] if escape in codes.keys(): text = text.replace(text[i] + trailingSpaces*" ", Emoji(escape).alias) i += len(Emoji(escape).alias) else: i += 1 return text
[ "re.sub" ]
[((1293, 1337), 're.sub', 're.sub', (['""":([^s:]?[\\\\w-]+):"""', 'replAlias', 'text'], {}), "(':([^s:]?[\\\\w-]+):', replAlias, text)\n", (1299, 1337), False, 'import re\n')]
# -*- coding:utf-8 -*- # __author__ = '<NAME>' # Link Model from flask_boilerplate.extensions import db # 表前缀 prefix = 'flask_boilerplate' class Link(db.Model): __tablename__ = '%s_link' % prefix id = db.Column(db.Integer, primary_key=True) sitename = db.Column(db.VARCHAR(30), nullable=False, default='') siteurl = db.Column(db.VARCHAR(75), nullable=False, default='') description = db.Column(db.VARCHAR(255), nullable=False, default='') hide = db.Column(db.Enum('n', 'y'), nullable=False, default='n') taxis = db.Column(db.Integer, nullable=False, default=0) def __repr__(self): return '<Link %r>' % (self.sitename)
[ "flask_boilerplate.extensions.db.Enum", "flask_boilerplate.extensions.db.Column", "flask_boilerplate.extensions.db.VARCHAR" ]
[((214, 253), 'flask_boilerplate.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (223, 253), False, 'from flask_boilerplate.extensions import db\n'), ((545, 593), 'flask_boilerplate.extensions.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'default': '(0)'}), '(db.Integer, nullable=False, default=0)\n', (554, 593), False, 'from flask_boilerplate.extensions import db\n'), ((279, 293), 'flask_boilerplate.extensions.db.VARCHAR', 'db.VARCHAR', (['(30)'], {}), '(30)\n', (289, 293), False, 'from flask_boilerplate.extensions import db\n'), ((347, 361), 'flask_boilerplate.extensions.db.VARCHAR', 'db.VARCHAR', (['(75)'], {}), '(75)\n', (357, 361), False, 'from flask_boilerplate.extensions import db\n'), ((419, 434), 'flask_boilerplate.extensions.db.VARCHAR', 'db.VARCHAR', (['(255)'], {}), '(255)\n', (429, 434), False, 'from flask_boilerplate.extensions import db\n'), ((485, 502), 'flask_boilerplate.extensions.db.Enum', 'db.Enum', (['"""n"""', '"""y"""'], {}), "('n', 'y')\n", (492, 502), False, 'from flask_boilerplate.extensions import db\n')]
""" Download civil war ships and their complements from dbpedia """ from os import path import json from SPARQLWrapper import SPARQLWrapper, JSON sparql = SPARQLWrapper("http://dbpedia.org/sparql") sparql.setQuery(""" select distinct ?ship, ?complement where { { {?ship dcterms:subject category:Ships_of_the_Union_Navy} UNION {?ship dcterms:subject [skos:broader category:Ships_of_the_Confederate_States_Navy]} } ?ship dbpprop:shipComplement ?complement FILTER (datatype(?complement) = xsd:integer) } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() data = [] for x in results['results']['bindings']: data.append({'ship': x['ship']['value'], 'complement': x['complement']['value']}) with open("ships.csv", "w") as f: writer = csv.DictWriter(f, ('ship', 'complement')) writer.writeheader() writer.writerows(data)
[ "SPARQLWrapper.SPARQLWrapper" ]
[((157, 199), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://dbpedia.org/sparql"""'], {}), "('http://dbpedia.org/sparql')\n", (170, 199), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n')]
import autograd.numpy as np import autograd import os from autograd import grad from autograd import jacobian from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt from matplotlib import cm from scipy.linalg import pinv import argparse parser = argparse.ArgumentParser() parser.add_argument("--function", type=int, default=1, help="choose from three low dimensional example functions, 1-3") opt = parser.parse_args() function = opt.function # GDA def gda(z_0, alpha=0.05, num_iter=100): z = [z_0] grad_fn = grad(target) for i in range(num_iter): g = grad_fn(z[-1]) z1 = z[-1] + g*np.array([-1,1])*alpha z.append(z1) z = np.array(z) return z # Extra Gradient def eg(z_0, alpha=0.05, num_iter=100): z = [z_0] grad_fn = grad(target) for i in range(num_iter): g = grad_fn(z[-1]) z1 = z[-1] + g*np.array([-1,1])*alpha g = grad_fn(z1) z2 = z[-1] + g*np.array([-1,1])*alpha z.append(z2) z = np.array(z) return z # Optimistic Gradient def ogda(z_0, alpha=0.05, num_iter=100): z = [z_0,z_0] grads = [] grad_fn = grad(target) for i in range(num_iter): g = grad_fn(z[-1]) gg = grad_fn(z[-2]) z1 = z[-1] + 2*g*np.array([-1,1])*alpha - gg*np.array([-1,1])*alpha z.append(z1) z = np.array(z) return z # Consensus Optimization def co(z_0, alpha=0.01, gamma=0.1, num_iter=100): z = [z_0] grads = [] grad_fn = grad(target) hessian = jacobian(grad_fn) for i in range(num_iter): g = grad_fn(z[-1]) H = hessian(z[-1]) #print(np.matmul(H,g), z[-1]) v = g*np.array([1,-1]) + gamma*np.matmul(H,g) z1 = z[-1] - alpha*v z.append(z1) z = np.array(z) return z # Symplectic gradient adjustment def sga(z_0, alpha=0.05, lamb=0.1, num_iter = 100): z = [z_0] grad_fn = grad(target) hessian = jacobian(grad_fn) for i in range(num_iter): g = grad_fn(z[-1]) w = g * np.array([1,-1]) H = hessian(z[-1]) HH = np.array([[1, -lamb*H[0,1]],[lamb*H[0,1],1]]) v = HH @ w z1 = z[-1] - alpha*v z.append(z1) z = np.array(z) return z # Follow the ridge def follow(z_0, alpha=0.05, num_iter = 100): z = [z_0] grad_fn = grad(target) hessian = jacobian(grad_fn) for i in range(num_iter): g = grad_fn(z[-1]) H = hessian(z[-1]) v = np.array([g[0], -g[1]-H[0,1]*np.squeeze(pinv(H[1:,1:]))*g[0]]) z1 = z[-1] - alpha*v z.append(z1) z = np.array(z) return z def f1(z): x = z[0] y = z[1] f = -3*x*x-y*y+4*x*y return f def f2(z): x = z[0] y = z[1] f = 3*x*x+y*y+4*x*y return f def f3(z): x = z[0] y = z[1] f = (0.4*x*x-0.1*(y-3*x+0.05*x*x*x)**2-0.01*y*y*y*y)*np.exp(-0.01*(x*x+y*y)) return f # Select target function if function==1: target = f1 # (0,0) is local minimax and global minimax z_0 = np.array([5., 7.]) # Set initial point plot_width = 12 # Set range of the plot root_dir = 'results/f1.pdf' elif function==2: target = f2 # (0,0) is not local minimax and not global minimax z_0 = np.array([6., 5.]) plot_width = 12 root_dir = 'results/f2.pdf' elif function==3: target = f3 # (0,0) is local minimax z_0 = np.array([7., 5.]) plot_width = 8 root_dir = 'results/f3.pdf' # Run all algorithms on target zfr=follow(z_0, num_iter = 1000, alpha = 0.05) zgda=gda(z_0, num_iter = 1000, alpha = 0.05) zogda=ogda(z_0, num_iter = 1000, alpha = 0.05) zeg=eg(z_0, num_iter = 1000, alpha = 0.05) zco=co(z_0, num_iter = 1000, alpha = 0.05, gamma=0.1) zsga=sga(z_0, num_iter = 1000, alpha = 0.01, lamb=1.0) # Plot trajectory with contour plt.rcParams.update({'font.size': 14}) def_colors=(plt.rcParams['axes.prop_cycle'].by_key()['color']) #plot_width=12 plt.figure(figsize=(5,5)) axes = plt.gca() axes.set_xlim([-plot_width,plot_width]) axes.set_ylim([-plot_width,plot_width]) x1 = np.arange(-plot_width,plot_width,0.1) y1 = np.arange(-plot_width,plot_width,0.1) X,Y = np.meshgrid(x1,y1) Z = np.zeros_like(X) for i in range(len(x1)): for j in range(len(y1)): Z[j][i] = target(np.array([x1[i] ,y1[j]])) plt.contourf(X,Y,Z,30,cmap=plt.cm.gray) lw = 2 hw = 0.7 line6,=plt.plot(zfr[:,0],zfr[:,1],'-',color='r',linewidth=lw,zorder=10) line1,=plt.plot(zgda[:,0],zgda[:,1],'--',linewidth=lw,color=def_colors[9],zorder=2) line2,=plt.plot(zogda[:,0],zogda[:,1],'--',linewidth=lw,color=def_colors[1]) line3,=plt.plot(zeg[:,0],zeg[:,1],'--',linewidth=lw,color=def_colors[2]) line4,=plt.plot(zsga[:,0],zsga[:,1],'--',color=def_colors[0],linewidth=lw) line5,=plt.plot(zco[:,0],zco[:,1],'--',color='xkcd:violet',linewidth=lw) init=plt.plot(zfr[0,0],zfr[0,1],'^',zorder=20,ms=12.0,color='r') plt.legend((line6,line1, line2, line3, line4, line5), ('FR','GDA', 'OGDA', 'EG', 'SGA', 'CO'), loc=4) os.makedirs('results/', exist_ok=True) plt.savefig(root_dir, dpi=300) #plt.show()
[ "autograd.numpy.arange", "argparse.ArgumentParser", "matplotlib.pyplot.plot", "autograd.numpy.zeros_like", "os.makedirs", "matplotlib.pyplot.legend", "autograd.numpy.meshgrid", "autograd.numpy.array", "autograd.grad", "matplotlib.pyplot.figure", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.contourf", "autograd.numpy.exp", "matplotlib.pyplot.gca", "autograd.numpy.matmul", "autograd.jacobian", "scipy.linalg.pinv", "matplotlib.pyplot.savefig" ]
[((264, 289), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (287, 289), False, 'import argparse\n'), ((3820, 3858), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (3839, 3858), True, 'import matplotlib.pyplot as plt\n'), ((3939, 3965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (3949, 3965), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3981), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3979, 3981), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4107), 'autograd.numpy.arange', 'np.arange', (['(-plot_width)', 'plot_width', '(0.1)'], {}), '(-plot_width, plot_width, 0.1)\n', (4077, 4107), True, 'import autograd.numpy as np\n'), ((4111, 4150), 'autograd.numpy.arange', 'np.arange', (['(-plot_width)', 'plot_width', '(0.1)'], {}), '(-plot_width, plot_width, 0.1)\n', (4120, 4150), True, 'import autograd.numpy as np\n'), ((4155, 4174), 'autograd.numpy.meshgrid', 'np.meshgrid', (['x1', 'y1'], {}), '(x1, y1)\n', (4166, 4174), True, 'import autograd.numpy as np\n'), ((4178, 4194), 'autograd.numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (4191, 4194), True, 'import autograd.numpy as np\n'), ((4300, 4343), 'matplotlib.pyplot.contourf', 'plt.contourf', (['X', 'Y', 'Z', '(30)'], {'cmap': 'plt.cm.gray'}), '(X, Y, Z, 30, cmap=plt.cm.gray)\n', (4312, 4343), True, 'import matplotlib.pyplot as plt\n'), ((4364, 4435), 'matplotlib.pyplot.plot', 'plt.plot', (['zfr[:, 0]', 'zfr[:, 1]', '"""-"""'], {'color': '"""r"""', 'linewidth': 'lw', 'zorder': '(10)'}), "(zfr[:, 0], zfr[:, 1], '-', color='r', linewidth=lw, zorder=10)\n", (4372, 4435), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4523), 'matplotlib.pyplot.plot', 'plt.plot', (['zgda[:, 0]', 'zgda[:, 1]', '"""--"""'], {'linewidth': 'lw', 'color': 'def_colors[9]', 'zorder': '(2)'}), "(zgda[:, 0], zgda[:, 1], '--', linewidth=lw, color=def_colors[9],\n zorder=2)\n", (4444, 4523), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4595), 'matplotlib.pyplot.plot', 'plt.plot', (['zogda[:, 0]', 'zogda[:, 1]', '"""--"""'], {'linewidth': 'lw', 'color': 'def_colors[1]'}), "(zogda[:, 0], zogda[:, 1], '--', linewidth=lw, color=def_colors[1])\n", (4528, 4595), True, 'import matplotlib.pyplot as plt\n'), ((4597, 4668), 'matplotlib.pyplot.plot', 'plt.plot', (['zeg[:, 0]', 'zeg[:, 1]', '"""--"""'], {'linewidth': 'lw', 'color': 'def_colors[2]'}), "(zeg[:, 0], zeg[:, 1], '--', linewidth=lw, color=def_colors[2])\n", (4605, 4668), True, 'import matplotlib.pyplot as plt\n'), ((4670, 4743), 'matplotlib.pyplot.plot', 'plt.plot', (['zsga[:, 0]', 'zsga[:, 1]', '"""--"""'], {'color': 'def_colors[0]', 'linewidth': 'lw'}), "(zsga[:, 0], zsga[:, 1], '--', color=def_colors[0], linewidth=lw)\n", (4678, 4743), True, 'import matplotlib.pyplot as plt\n'), ((4745, 4816), 'matplotlib.pyplot.plot', 'plt.plot', (['zco[:, 0]', 'zco[:, 1]', '"""--"""'], {'color': '"""xkcd:violet"""', 'linewidth': 'lw'}), "(zco[:, 0], zco[:, 1], '--', color='xkcd:violet', linewidth=lw)\n", (4753, 4816), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4882), 'matplotlib.pyplot.plot', 'plt.plot', (['zfr[0, 0]', 'zfr[0, 1]', '"""^"""'], {'zorder': '(20)', 'ms': '(12.0)', 'color': '"""r"""'}), "(zfr[0, 0], zfr[0, 1], '^', zorder=20, ms=12.0, color='r')\n", (4824, 4882), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4983), 'matplotlib.pyplot.legend', 'plt.legend', (['(line6, line1, line2, line3, line4, line5)', "('FR', 'GDA', 'OGDA', 'EG', 'SGA', 'CO')"], {'loc': '(4)'}), "((line6, line1, line2, line3, line4, line5), ('FR', 'GDA', 'OGDA',\n 'EG', 'SGA', 'CO'), loc=4)\n", (4886, 4983), True, 'import matplotlib.pyplot as plt\n'), ((4980, 5018), 'os.makedirs', 'os.makedirs', (['"""results/"""'], {'exist_ok': '(True)'}), "('results/', exist_ok=True)\n", (4991, 5018), False, 'import os\n'), ((5019, 5049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['root_dir'], {'dpi': '(300)'}), '(root_dir, dpi=300)\n', (5030, 5049), True, 'import matplotlib.pyplot as plt\n'), ((535, 547), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (539, 547), False, 'from autograd import grad\n'), ((680, 691), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (688, 691), True, 'import autograd.numpy as np\n'), ((790, 802), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (794, 802), False, 'from autograd import grad\n'), ((1005, 1016), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (1013, 1016), True, 'import autograd.numpy as np\n'), ((1141, 1153), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (1145, 1153), False, 'from autograd import grad\n'), ((1344, 1355), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (1352, 1355), True, 'import autograd.numpy as np\n'), ((1488, 1500), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (1492, 1500), False, 'from autograd import grad\n'), ((1515, 1532), 'autograd.jacobian', 'jacobian', (['grad_fn'], {}), '(grad_fn)\n', (1523, 1532), False, 'from autograd import jacobian\n'), ((1767, 1778), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (1775, 1778), True, 'import autograd.numpy as np\n'), ((1906, 1918), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (1910, 1918), False, 'from autograd import grad\n'), ((1933, 1950), 'autograd.jacobian', 'jacobian', (['grad_fn'], {}), '(grad_fn)\n', (1941, 1950), False, 'from autograd import jacobian\n'), ((2204, 2215), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (2212, 2215), True, 'import autograd.numpy as np\n'), ((2322, 2334), 'autograd.grad', 'grad', (['target'], {}), '(target)\n', (2326, 2334), False, 'from autograd import grad\n'), ((2349, 2366), 'autograd.jacobian', 'jacobian', (['grad_fn'], {}), '(grad_fn)\n', (2357, 2366), False, 'from autograd import jacobian\n'), ((2584, 2595), 'autograd.numpy.array', 'np.array', (['z'], {}), '(z)\n', (2592, 2595), True, 'import autograd.numpy as np\n'), ((3017, 3037), 'autograd.numpy.array', 'np.array', (['[5.0, 7.0]'], {}), '([5.0, 7.0])\n', (3025, 3037), True, 'import autograd.numpy as np\n'), ((2081, 2134), 'autograd.numpy.array', 'np.array', (['[[1, -lamb * H[0, 1]], [lamb * H[0, 1], 1]]'], {}), '([[1, -lamb * H[0, 1]], [lamb * H[0, 1], 1]])\n', (2089, 2134), True, 'import autograd.numpy as np\n'), ((2855, 2886), 'autograd.numpy.exp', 'np.exp', (['(-0.01 * (x * x + y * y))'], {}), '(-0.01 * (x * x + y * y))\n', (2861, 2886), True, 'import autograd.numpy as np\n'), ((3245, 3265), 'autograd.numpy.array', 'np.array', (['[6.0, 5.0]'], {}), '([6.0, 5.0])\n', (3253, 3265), True, 'import autograd.numpy as np\n'), ((2024, 2041), 'autograd.numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (2032, 2041), True, 'import autograd.numpy as np\n'), ((3393, 3413), 'autograd.numpy.array', 'np.array', (['[7.0, 5.0]'], {}), '([7.0, 5.0])\n', (3401, 3413), True, 'import autograd.numpy as np\n'), ((4274, 4298), 'autograd.numpy.array', 'np.array', (['[x1[i], y1[j]]'], {}), '([x1[i], y1[j]])\n', (4282, 4298), True, 'import autograd.numpy as np\n'), ((1669, 1686), 'autograd.numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (1677, 1686), True, 'import autograd.numpy as np\n'), ((1694, 1709), 'autograd.numpy.matmul', 'np.matmul', (['H', 'g'], {}), '(H, g)\n', (1703, 1709), True, 'import autograd.numpy as np\n'), ((628, 645), 'autograd.numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (636, 645), True, 'import autograd.numpy as np\n'), ((883, 900), 'autograd.numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (891, 900), True, 'import autograd.numpy as np\n'), ((953, 970), 'autograd.numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (961, 970), True, 'import autograd.numpy as np\n'), ((1292, 1309), 'autograd.numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1300, 1309), True, 'import autograd.numpy as np\n'), ((1264, 1281), 'autograd.numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1272, 1281), True, 'import autograd.numpy as np\n'), ((2503, 2518), 'scipy.linalg.pinv', 'pinv', (['H[1:, 1:]'], {}), '(H[1:, 1:])\n', (2507, 2518), False, 'from scipy.linalg import pinv\n')]
import re import math from tld import get_tld from Levenshtein import distance from .suspicious import keywords, tlds def entropy(string: str) -> float: """ Calculates the Shannon entropy of a string Original code: https://github.com/x0rz/phishing_catcher/blob/master/catch_phishing.py """ prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))] ent = -sum([p * math.log(p) / math.log(2.0) for p in prob]) return ent def score_domain(domain: str) -> int: """Score `domain`. The highest score, the most probable `domain` is a phishing site. Args: domain (str): the domain to check. Returns: int: the score of `domain`. #https://github.com/x0rz/phishing_catcher/blob/master/catch_phishing.py """ score = 0 for t in tlds: if domain.endswith(t): score += 20 # Remove initial '*.' for wildcard certificates bug if domain.startswith("*."): domain = domain[2:] # Removing TLD to catch inner TLD in subdomain (ie. paypal.com.domain.com) try: res = get_tld(domain, as_object=True, fail_silently=True, fix_protocol=True) domain = ".".join([res.subdomain, res.domain]) except: # noqa: B110 pass words_in_domain = re.split("\W+", domain) # Remove initial '*.' for wildcard certificates bug if domain.startswith("*."): domain = domain[2:] # ie. detect fake .com (ie. *.com-account-management.info) if words_in_domain[0] in ["com", "net", "org"]: score += 10 # Testing keywords for word in keywords.items(): if word[0] in domain: score += word[1] # Higher entropy is kind of suspicious score += int(round(entropy(domain) * 10)) # Testing Levenshtein distance for strong keywords (>= 70 points) (ie. paypol) for key in [k for (k, s) in keywords.items() if s >= 70]: # Removing too generic keywords (ie. mail.domain.com) for word in [w for w in words_in_domain if w not in ["email", "mail", "cloud"]]: if distance(str(word), str(key)) == 1: score += 70 # Lots of '-' (ie. www.paypal-datacenter.com-acccount-alert.com) if "xn--" not in domain and domain.count("-") >= 4: score += domain.count("-") * 3 # Deeply nested subdomains (ie. www.paypal.com.security.accountupdate.gq) if domain.count(".") >= 3: score += domain.count(".") * 3 return score
[ "math.log", "re.split", "tld.get_tld" ]
[((1289, 1313), 're.split', 're.split', (['"""\\\\W+"""', 'domain'], {}), "('\\\\W+', domain)\n", (1297, 1313), False, 'import re\n'), ((1101, 1171), 'tld.get_tld', 'get_tld', (['domain'], {'as_object': '(True)', 'fail_silently': '(True)', 'fix_protocol': '(True)'}), '(domain, as_object=True, fail_silently=True, fix_protocol=True)\n', (1108, 1171), False, 'from tld import get_tld\n'), ((429, 442), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (437, 442), False, 'import math\n'), ((415, 426), 'math.log', 'math.log', (['p'], {}), '(p)\n', (423, 426), False, 'import math\n')]
import csv import sqlite3 from tkinter import * from tkinter import filedialog """Tool to compare two reports and provide specific information from matching lines""" class MatchTool: UNPLACED_RSL_TEXT = [ "Copy Required Report", "Ad Copy Status Report", "Unplaced Spots", "Required Spots", ] def __init__(self, master): self.master = master master.geometry("400x300") master.title("Discrepancy Match Tool") self.top_frame = Frame(master) self.bottom_frame = Frame(master, width=400) self.novar_button_var = IntVar() self.novar_button_var.set(0) self.novar_button = Checkbutton(self.top_frame, variable=self.novar_button_var, command=self.enableNovar) self.eclipse_button_var = IntVar() self.eclipse_button_var.set(0) self.eclipse_button = Checkbutton(self.top_frame, variable=self.eclipse_button_var, command=self.enableEclipse) self.missing_button_var = IntVar() self.missing_button_var.set(0) self.missing_button = Checkbutton(self.top_frame, state=DISABLED, variable=self.missing_button_var, command=self.missingCopy) self.unplaced_button_var = IntVar() self.unplaced_button_var.set(0) self.unplaced_button = Checkbutton(self.top_frame, state=DISABLED, variable=self.unplaced_button_var, command=self.unplacedRSL) self.novar_label = Label(self.top_frame, text="Novar") self.eclipse_label = Label(self.top_frame, text="Eclipse/XG") self.missing_label = Label(self.top_frame, text="Missing Copy") self.unplaced_label_text = StringVar() self.unplaced_label_text.set("Unplaced or Required Spots") self.unplaced_label = Label(self.top_frame, textvariable=self.unplaced_label_text, width=22, anchor=constants.W) self.load_discrep = Button(self.bottom_frame, text="Load Discrepancy Report", width=25, command=self.loadDiscrep) self.load_discrep_file_name_text = StringVar() self.load_discrep_file_name = Label(self.bottom_frame, textvariable=self.load_discrep_file_name_text) self.submit = Button(self.bottom_frame, text="Submit") self.load_unplaced_text = StringVar() self.load_unplaced_text.set("Load Report") self.load_unplaced = Button(self.bottom_frame, textvariable=self.load_unplaced_text, width=25, command=self.loadReports) self.load_unplaced_file_name_text = StringVar() self.load_unplaced_file_name = Label(self.bottom_frame, textvariable=self.load_unplaced_file_name_text) #Layout self.top_frame.grid() self.bottom_frame.grid(row=1) self.novar_button.grid() self.eclipse_button.grid(row=1) self.missing_button.grid(row=2) self.unplaced_button.grid(row=3) self.novar_label.grid(row=0, column=1, sticky=W) self.eclipse_label.grid(row=1, column=1, sticky=W) self.missing_label.grid(row=2, column=1, sticky=W) self.unplaced_label.grid(row=3, column=1, sticky=W) self.load_discrep.grid(row=0, pady=3, ipadx=5) self.load_discrep_file_name.grid(row=1, pady=3, ipadx=5) self.load_unplaced.grid(row=2, pady=3, ipadx=5) self.load_unplaced_file_name.grid(row=3, pady=3, ipadx=5) #Functions def enableNovar(self): """Activates the Missing Copy and Unplaced Spots checkboxes, and disables the Novar checkbox""" if self.novar_button_var.get() == 1: self.eclipse_button["state"] = DISABLED self.missing_button["state"] = ACTIVE self.unplaced_button["state"] = ACTIVE self.unplaced_label_text.set(self.UNPLACED_RSL_TEXT[3]) else: self.eclipse_button["state"] = ACTIVE self.missing_button["state"] = DISABLED self.unplaced_button["state"] = DISABLED self.unplaced_label_text.set("Unplaced or Required Spots") def enableEclipse(self): """Activates the Missing Copy and Required Spots checkboxes, and disables the Eclipse checkbox""" if self.eclipse_button_var.get() == 1: self.novar_button["state"] = DISABLED self.missing_button["state"] = ACTIVE self.unplaced_button["state"] = ACTIVE self.unplaced_label_text.set(self.UNPLACED_RSL_TEXT[2]) else: self.novar_button["state"] = ACTIVE self.missing_button["state"] = DISABLED self.unplaced_button["state"] = DISABLED self.unplaced_label_text.set("Unplaced or Required Spots") def missingCopy(self): """Changes the value of missing_button_var to 1, changes text of unplaced_text, shows Submit button""" if self.missing_button_var.get() == 1: self.unplaced_button["state"] = DISABLED self.submit.grid(row=4, pady=5) if self.novar_button_var.get() == 1: self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[1]) elif self.eclipse_button_var.get() == 1: self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[0]) else: self.unplaced_button["state"] = ACTIVE self.load_unplaced_text.set("Load Report") self.submit.grid_forget() def unplacedRSL(self): """changes the value of unplaced_button_var to 1, changes text of unplaced_text, shows Submit button""" if self.unplaced_button_var.get() == 1: self.missing_button["state"] = DISABLED self.submit.grid(row=4, pady=5) if self.novar_button_var.get() == 1: self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[3]) elif self.eclipse_button_var.get() == 1: self.load_unplaced_text.set("Load " + self.UNPLACED_RSL_TEXT[2]) else: self.missing_button["state"] = ACTIVE self.load_unplaced_text.set("Load Report") self.submit.grid_forget() def unplacedEdit(self, loaded_file): """Opens the CSV file and edits the date and time for the Unplaced Spots report""" with open(loaded_file) as csv_file: unplaced_reader = csv.reader(csv_file, delimiter=',') unplaced_list = [row for row in unplaced_reader] unplaced_list.pop(0) unplaced_list[0].extend(['Date', 'Time']) for i in range(1, len(unplaced_list)): date_time = unplaced_list[i][1].split(' ') unplaced_list[i].append(date_time[0]) time_of_day = int(date_time[1][:date_time[1].index(":")]) if time_of_day < 13: date_time[1] = date_time[1] + " AM" else: time_of_day = time_of_day - 12 date_time[1] = str(time_of_day) + date_time[1][date_time[1].index(":"):] + " PM" unplaced_list[i].append(date_time[1]) return unplaced_list def rslEdit(self, loaded_file): """Edits the RSL report's Date and Time""" with open(loaded_file) as csv_file: rsl_reader = csv.reader(csv_file, delimiter=',') rsl_list = [row for row in rsl_reader] rsl_list[0].extend(['Date', 'Time']) for i in range(1, len(rsl_list)): date = rsl_list[i][16] date = date[:date.index("-")] new_date = date.split('/') #add 20 to the beginning of the year new_date[2] = "20" + new_date[2] date = new_date[0] + '/' + new_date[1] + '/' + new_date[2] time = rsl_list[i][17] time = time[:time.index("-")] rsl_list[i].append(date) rsl_list[i].append(time) for x in range(1, len(rsl_list)): digits = rsl_list[x][31] digits = int(digits[:digits.index(":")]) if digits < 10: rsl_list[x][31] = rsl_list[x][31][1:] + " AM" elif digits < 13: rsl_list[x][31] = rsl_list[x][31] + " AM" else: digits = digits - 12 rsl_list[x][31] = str(digits) + rsl_list[x][31][rsl_list[x][31].index(":"):] + " PM" return rsl_list def copyRequiredEdit(self, loaded_file): """Removes the first row from the Copy Required Report""" with open(loaded_file) as csv_file: cr_reader = csv.reader(csv_file, delimiter=',') cr_list = [row for row in cr_reader] cr_list.pop(0) return cr_list # def discrepEdit(self, loaded_file): # """Splits up the contract ID's into a list""" # with open(loaded_file) as csv_file: # discrep_reader = csv.reader(csv_file, delimiter=',') # discrep_list = [row for row in discrep_reader] # for i in range(1, len(discrep_list)): # discrep_list[i][11] = discrep_list[i][11].split(';') # return discrep_list def discrepancyDB(self, discrepancy): """creates SQL database from the discrepancy report""" with sqlite3.connect("DiscrepMatch.db") as connection: c = connection.cursor() discrep = csv.reader(open(discrepancy, "rU")) c.execute("DROP TABLE if exists discrepancy1") c.execute("""CREATE TABLE discrepancy1(Discrepancy TEXT, Reservation TEXT, Event TEXT, Episode TEXT, DateOf TEXT, Start TEXT, Market TEXT, Zone TEXT, Network TEXT, ClientID INT, ClientName TEXT, ContractID TEXT, Rate TEXT, AE TEXT, Modified TEXT, ModifiedBy TEXT)""") c.executemany("""INSERT INTO discrepancy1(Discrepancy, Reservation, Event, Episode, DateOf, Start, Market, Zone, Network, ClientID, ClientName, ContractID, Rate, AE, Modified, ModifiedBy) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", discrep) #AdCopyStatus (No Edits needed) def adCopyDB(self, ad_copy): """creates SQL database from the ad copy status report""" with sqlite3.connect("DiscrepMatch.db") as connection: c = connection.cursor() adCopyStatus = csv.reader(open(ad_copy, "rU")) c.execute("DROP TABLE if exists AdCopyStatus") c.execute("""CREATE TABLE AdCopyStatus(ClientID INT, ClientName TEXT, AdCopyID INT, CutName TEXT, CutStart TEXT, CutStop TEXT, Reason TEXT)""") c.executemany("""INSERT INTO AdCopyStatus(ClientID, ClientName, AdCopyID, CutName, CutStart, CutStop, Reason) values (?, ?, ?, ?, ?, ?, ?)""", adCopyStatus) #Copy Required (Edit Required) def copyRequiredDB(self, copy_required): """creates SQL database from the copy required report""" with sqlite3.connect("DiscrepMatch.db") as connection: c = connection.cursor() copyRequired = copy_required c.execute("DROP TABLE if exists copyrequired") c.execute("""CREATE TABLE copyrequired(ClientID TEXT, ClientName TEXT, Rotation INT, RotDesc INT, SalesID INT, AE TEXT, SalOffID TEXT, SalOff TEXT, OrderNum TEXT, Networks TEXT, Regions TEXT, TotalRev TEXT, AvgPrty INT, DateNeed TEXT, Issue TEXT)""") c.executemany("""INSERT INTO copyrequired(ClientID, ClientName, Rotation, RotDesc, SalesID, AE, SalOffID, SalOff, OrderNum, Networks, Regions, TotalRev, AvgPrty, DateNeed, Issue) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", copyRequired) #RSL (Edit Required) def rslDB(self, rsl_report): """creates SQL database from the RSL report""" with sqlite3.connect("DiscrepMatch.db") as connection: c = connection.cursor() rsl = rsl_report c.execute("DROP TABLE if exists RSL") c.execute("""CREATE TABLE RSL(AE TEXT, Priority INT, ClientID INT, Client TEXT, ConID INT, LineNum INT, Zone TEXT, Network TEXT, DaysAuth TEXT, Mon INT, Tue INT, Wed INT, Thu INT, Fri INT, Sat INT, Sun INT, OldDates TEXT, Daypart TEXT, CGName TEXT, Total INT, Normal INT, Sched INT, Aired INT, ToDO INT, FinalWeek TEXT, Length INT, Program TEXT, Cost INT, LostRev INT, RD INT, NewDate TEXT, NewTime TEXT)""") c.executemany("""INSERT INTO RSL(AE, Priority, ClientID, Client, ConID, LineNum, Zone, Network, DaysAuth, Mon, Tue, Wed, Thu, Fri, Sat, Sun, OldDates, Daypart, CGName, Total, Normal, Sched, Aired, ToDO, FinalWeek, Length, Program, Cost, LostRev, RD, NewDate, NewTime) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", rsl) #Unplaced (Edit Required) def unplacedDB(self, unplaced_report): """Creates SQL database from the unplaced spot report""" with sqlite3.connect("DiscrepMatch.db") as connection: c = connection.cursor() unplaced = unplaced_report c.execute("DROP TABLE if exists unplacedSpots") c.execute("""CREATE TABLE unplacedSpots(OrderNum INT, OldDate TEXT, SpotName TEXT, Length INT, Description TEXT, Network TEXT, ClientID INT, Client TEXT, Phone TEXT, Initials TEXT, Rotation INT, Active TEXT, UCType TEXT, Retail INT, InvType TEXT, Billing TEXT, Market TEXT, Zone TEXT, Priority INT, Buy1 INT, BuyType TEXT, SpotsWeek INT, SpotsLine INT, MonAct TEXT, MonQua INT, TueAct TEXT, TueQua INT, WedAct TEXT, WedQua INT, ThuAct TEXT, ThuQua INT, FriAct TEXT, FriQua INT, SatAct TEXT, SatQua INT, SunAct TEXT, SunQua INT, Buy2 INT, Exception TEXT, Daypart TEXT, Entity TEXT, LineType TEXT, LineNum INT, OfficeID TEXT, Description2 TEXT, Name TEXT, OfficeName TEXT, Exception2 TEXT, Uniform TEXT, LineNum2 INT, "Group" INT, EndDate TEXT, Orbits TEXT, NewDate TEXT, NewTime TEXT)""") c.executemany("""INSERT INTO unplacedSpots(OrderNum, OldDate, SpotName, Length, Description, Network, ClientID, Client, Phone, Initials, Rotation, Active, UCType, Retail, InvType, Billing, Market, Zone, Priority, Buy1, BuyType, SpotsWeek, Spotsline, MonAct, MonQua, TueAct, TueQua, WedAct, WedQua, ThuAct, ThuQua, FriAct, FriQua, SatAct, SatQua, SunAct, SunQua, Buy2, Exception, Daypart, Entity, LineType, LineNum, OfficeID, Description2, Name, OfficeName, Exception2, Uniform, LineNum2, "Group", EndDate, Orbits, NewDate, NewTime) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", unplaced) def loadDiscrep(self): """Opens file directory for user to load report in xls format""" discrepReport = filedialog.askopenfilename( filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")] ) if not discrepReport: return else: self.load_discrep_file_name_text.set("Discrepancy Report loaded successfully") #discrepReport = self.discrepEdit(discrepReport) self.discrepancyDB(discrepReport) def loadReports(self): """Opens file directory for user to load file, file type depends on prior selections""" #Copy Required (Eclipse/Missing Copy) if self.eclipse_button_var.get() == 1 and self.missing_button_var.get() == 1: copyRequired = filedialog.askopenfilename( filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")] ) if not copyRequired: return else: self.load_unplaced_file_name_text.set("Copy Required loaded successfully") copyRequired = self.copyRequiredEdit(copyRequired) self.copyRequiredDB(copyRequired) #AdCopyStatus (Novar/Missing Copy) elif self.novar_button_var.get() == 1 and self.missing_button_var.get() == 1: adCopyStatus = filedialog.askopenfilename( filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")] ) if not adCopyStatus: return else: self.load_unplaced_file_name_text.set("AdCopyStatus Report loaded successfully") self.adCopyDB(adCopyStatus) #Unplaced Spots (Eclipse/Unplaced) elif self.eclipse_button_var.get() == 1 and self.unplaced_button_var.get() == 1: unplacedSpots = filedialog.askopenfilename( filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")] ) if not unplacedSpots: return else: self.load_unplaced_file_name_text.set("Unplaced Spots Report loaded successfully") unplacedSpots = self.unplacedEdit(unplacedSpots) self.unplacedDB(unplacedSpots) #RSL (Novar/Unplaced) elif self.novar_button_var.get() == 1 and self.unplaced_button_var.get() == 1: requiredSpots = filedialog.askopenfilename( filetypes=[("CSV File", "*.csv"), ("All Files", "*.*")] ) if not requiredSpots: return else: self.load_unplaced_file_name_text.set("Required Spots loaded successfully") requiredSpots = self.rslEdit(requiredSpots) self.rslDB(requiredSpots) # Add functionality for the Submit button: finds the matches between the two db's opened up and returns them as CSV # Should I use :memory: or actual db's? # Will :memory: work once the function is over? Won't it close the db being used? # How can I write back to a CSV? # Format the tool better #Remove checks if button gets disabled root = Tk() interface = MatchTool(root) root.mainloop()
[ "sqlite3.connect", "csv.reader", "tkinter.filedialog.askopenfilename" ]
[((13046, 13133), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "[('CSV File', '*.csv'), ('All Files', '*.*')]"}), "(filetypes=[('CSV File', '*.csv'), ('All Files',\n '*.*')])\n", (13072, 13133), False, 'from tkinter import filedialog\n'), ((5493, 5528), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (5503, 5528), False, 'import csv\n'), ((6240, 6275), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (6250, 6275), False, 'import csv\n'), ((7294, 7329), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (7304, 7329), False, 'import csv\n'), ((7875, 7909), 'sqlite3.connect', 'sqlite3.connect', (['"""DiscrepMatch.db"""'], {}), "('DiscrepMatch.db')\n", (7890, 7909), False, 'import sqlite3\n'), ((8725, 8759), 'sqlite3.connect', 'sqlite3.connect', (['"""DiscrepMatch.db"""'], {}), "('DiscrepMatch.db')\n", (8740, 8759), False, 'import sqlite3\n'), ((9349, 9383), 'sqlite3.connect', 'sqlite3.connect', (['"""DiscrepMatch.db"""'], {}), "('DiscrepMatch.db')\n", (9364, 9383), False, 'import sqlite3\n'), ((10132, 10166), 'sqlite3.connect', 'sqlite3.connect', (['"""DiscrepMatch.db"""'], {}), "('DiscrepMatch.db')\n", (10147, 10166), False, 'import sqlite3\n'), ((11215, 11249), 'sqlite3.connect', 'sqlite3.connect', (['"""DiscrepMatch.db"""'], {}), "('DiscrepMatch.db')\n", (11230, 11249), False, 'import sqlite3\n'), ((13604, 13691), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "[('CSV File', '*.csv'), ('All Files', '*.*')]"}), "(filetypes=[('CSV File', '*.csv'), ('All Files',\n '*.*')])\n", (13630, 13691), False, 'from tkinter import filedialog\n'), ((14049, 14136), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "[('CSV File', '*.csv'), ('All Files', '*.*')]"}), "(filetypes=[('CSV File', '*.csv'), ('All Files',\n '*.*')])\n", (14075, 14136), False, 'from tkinter import filedialog\n'), ((14444, 14531), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "[('CSV File', '*.csv'), ('All Files', '*.*')]"}), "(filetypes=[('CSV File', '*.csv'), ('All Files',\n '*.*')])\n", (14470, 14531), False, 'from tkinter import filedialog\n'), ((14882, 14969), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "[('CSV File', '*.csv'), ('All Files', '*.*')]"}), "(filetypes=[('CSV File', '*.csv'), ('All Files',\n '*.*')])\n", (14908, 14969), False, 'from tkinter import filedialog\n')]
import argparse import numpy as np from scipy.io import wavfile from tqdm import trange from ar_model import ARmodel def correctSignal(signal, model, window_size, pred_size, step, treshold=3): """Correct signal using AR model Args: signal (np.array): signal to correct model (ARmodel): autoregresive model window_size (int): length of the window for updating AR model coefs pred_size (int): number of samples to generate from AR model step (int): step interval treshold (float): how many times error have to be bigger then standard deviation to classify sample as disturbed Returns: np.array: cerrected signal """ out = np.copy(signal) for i in trange(0, input.shape[0]-window_size-pred_size, step): paramsEnd = i+window_size predEnd = paramsEnd+pred_size model.updateParams(out[i:paramsEnd]) estimated = model.estimateSignal(pred_size, out[paramsEnd-model.r:paramsEnd]) err = np.abs(out[paramsEnd:predEnd] - estimated) std = np.std(err) disturbed = np.abs(err) > std*treshold disturbanceLength = 0 for j in range(pred_size): if disturbed[j]: disturbanceLength += 1 elif disturbanceLength > 0: k = j + paramsEnd before = signal[k-disturbanceLength-1] after = signal[k] out[k-disturbanceLength:k] = np.linspace(before,after,disturbanceLength+2)[1:-1] disturbanceLength = 0 return out if __name__ == '__main__': parser = argparse.ArgumentParser(description="Removing impulse interference from music recordings") parser.add_argument('filename', metavar='filename', type=str, help='path to wave file') parser.add_argument('-r', '--order', type=int, default=4, help='order of AR model') parser.add_argument('-o', '--out_file', type=str, default='out.wav', help='name of the output file') parser.add_argument('-u', '--param_window', type=int, default=256, help='length of the window for updating AR model coefs') parser.add_argument('-e', '--pred_widnow', type=int, default=8, help='number of samples to generate from AR model') parser.add_argument('-s', '--step', type=int, default=4, help='step interval') parser.add_argument('-d', '--decay', type=float, default=1.0, help='decay rate for exponential window') parser.add_argument('-m', '--max_std', type=float, default=3.0, help='how many times error have to be bigger then standard deviation to classify sample as disturbed') args = parser.parse_args() fs, input = wavfile.read(args.filename) input = input / 2**15 model = ARmodel(args.order, args.decay) output = correctSignal(input, model, args.param_window, args.pred_widnow, args.step, args.max_std) wavfile.write(args.out_file, fs, output)
[ "numpy.abs", "argparse.ArgumentParser", "numpy.copy", "tqdm.trange", "numpy.std", "scipy.io.wavfile.read", "ar_model.ARmodel", "scipy.io.wavfile.write", "numpy.linspace" ]
[((711, 726), 'numpy.copy', 'np.copy', (['signal'], {}), '(signal)\n', (718, 726), True, 'import numpy as np\n'), ((741, 798), 'tqdm.trange', 'trange', (['(0)', '(input.shape[0] - window_size - pred_size)', 'step'], {}), '(0, input.shape[0] - window_size - pred_size, step)\n', (747, 798), False, 'from tqdm import trange\n'), ((1621, 1716), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Removing impulse interference from music recordings"""'}), "(description=\n 'Removing impulse interference from music recordings')\n", (1644, 1716), False, 'import argparse\n'), ((2655, 2682), 'scipy.io.wavfile.read', 'wavfile.read', (['args.filename'], {}), '(args.filename)\n', (2667, 2682), False, 'from scipy.io import wavfile\n'), ((2722, 2753), 'ar_model.ARmodel', 'ARmodel', (['args.order', 'args.decay'], {}), '(args.order, args.decay)\n', (2729, 2753), False, 'from ar_model import ARmodel\n'), ((2862, 2902), 'scipy.io.wavfile.write', 'wavfile.write', (['args.out_file', 'fs', 'output'], {}), '(args.out_file, fs, output)\n', (2875, 2902), False, 'from scipy.io import wavfile\n'), ((1015, 1057), 'numpy.abs', 'np.abs', (['(out[paramsEnd:predEnd] - estimated)'], {}), '(out[paramsEnd:predEnd] - estimated)\n', (1021, 1057), True, 'import numpy as np\n'), ((1072, 1083), 'numpy.std', 'np.std', (['err'], {}), '(err)\n', (1078, 1083), True, 'import numpy as np\n'), ((1105, 1116), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (1111, 1116), True, 'import numpy as np\n'), ((1474, 1523), 'numpy.linspace', 'np.linspace', (['before', 'after', '(disturbanceLength + 2)'], {}), '(before, after, disturbanceLength + 2)\n', (1485, 1523), True, 'import numpy as np\n')]
import io import json import os from typing import Any, Dict, IO, Iterator, Optional, Tuple from altair_data_server import Provider from PIL import Image import pytest import selenium.webdriver from selenium.webdriver.remote.webdriver import WebDriver from altair_saver import HTMLSaver from altair_saver._utils import internet_connected CDN_URL = "https://cdn.jsdelivr.net" @pytest.fixture(scope="module") def internet_ok() -> bool: return internet_connected() @pytest.fixture(scope="module") def provider() -> Iterator[Provider]: provider = Provider() yield provider provider.stop() @pytest.fixture(scope="module") def driver() -> Iterator[WebDriver]: options = selenium.webdriver.chrome.options.Options() options.add_argument("--headless") if hasattr(os, "geteuid") and (os.geteuid() == 0): options.add_argument("--no-sandbox") driver = selenium.webdriver.Chrome(options=options) yield driver driver.quit() def get_testcases() -> Iterator[Tuple[str, Dict[str, Any]]]: directory = os.path.join(os.path.dirname(__file__), "testcases") cases = set(f.split(".")[0] for f in os.listdir(directory)) f: IO for case in sorted(cases): with open(os.path.join(directory, f"{case}.vl.json")) as f: vl = json.load(f) with open(os.path.join(directory, f"{case}.png"), "rb") as f: png = f.read() yield case, {"vega-lite": vl, "png": png} @pytest.mark.parametrize("inline", [True, False]) @pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}]) @pytest.mark.parametrize("case, data", get_testcases()) def test_html_save( case: str, data: Dict[str, Any], embed_options: Optional[dict], inline: bool ) -> None: saver = HTMLSaver(data["vega-lite"], inline=inline, embed_options=embed_options) fp = io.StringIO() saver.save(fp, "html") html = fp.getvalue() assert isinstance(html, str) assert html.strip().startswith("<!DOCTYPE html>") assert json.dumps(data["vega-lite"]) in html assert f"const embedOpt = {json.dumps(embed_options or {})}" in html if inline: assert CDN_URL not in html else: assert CDN_URL in html @pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}]) @pytest.mark.parametrize("case, data", get_testcases()) def test_html_mimebundle( case: str, data: Dict[str, Any], embed_options: Optional[dict], ) -> None: saver = HTMLSaver(data["vega-lite"], embed_options=embed_options) bundle = saver.mimebundle("html") assert bundle.keys() == {"text/html"} html = bundle["text/html"] assert isinstance(html, str) assert html.strip().startswith("<div") assert json.dumps(data["vega-lite"]) in html assert json.dumps(embed_options or {}) in html assert CDN_URL in html def test_bad_format() -> None: saver = HTMLSaver({}) with pytest.raises(ValueError): saver.mimebundle("vega") @pytest.mark.parametrize("case, data", get_testcases()) @pytest.mark.parametrize("inline", [True, False]) def test_html_save_rendering( provider: Provider, driver: WebDriver, case: str, data: Dict[str, Any], inline: bool, internet_ok: bool, ) -> None: if not (inline or internet_ok): pytest.xfail("Internet connection not available") saver = HTMLSaver(data["vega-lite"], inline=inline) fp = io.StringIO() saver.save(fp, "html") html = fp.getvalue() resource = provider.create(content=html, extension="html") driver.set_window_size(800, 600) driver.get(resource.url) element = driver.find_element_by_class_name("vega-visualization") png = driver.get_screenshot_as_png() im = Image.open(io.BytesIO(png)) left = element.location["x"] top = element.location["y"] right = element.location["x"] + element.size["width"] bottom = element.location["y"] + element.size["height"] im = im.crop((left, top, right, bottom)) im_expected = Image.open(io.BytesIO(data["png"])) assert abs(im.size[0] - im_expected.size[0]) < 40 assert abs(im.size[1] - im_expected.size[1]) < 40 @pytest.mark.parametrize("requirejs", [True, False]) @pytest.mark.parametrize("case, data", get_testcases()) def test_html_mimebundle_rendering( provider: Provider, driver: WebDriver, case: str, data: Dict[str, Any], requirejs: bool, internet_ok: bool, ) -> None: if not internet_ok: pytest.xfail("Internet connection not available") saver = HTMLSaver(data["vega-lite"]) bundle = saver.mimebundle("html") html = bundle["text/html"] assert isinstance(html, str) if requirejs: html = f"""<!DOCTYPE html> <html> <head><script src="{CDN_URL}/npm/requirejs@2.3.6"></script></head> <body>{html}</body> </html> """ else: html = f"<html>{html}</html>" resource = provider.create(content=html, extension="html") driver.set_window_size(800, 600) driver.get(resource.url) element = driver.find_element_by_class_name("vega-visualization") png = driver.get_screenshot_as_png() im = Image.open(io.BytesIO(png)) left = element.location["x"] top = element.location["y"] right = element.location["x"] + element.size["width"] bottom = element.location["y"] + element.size["height"] im = im.crop((left, top, right, bottom)) im_expected = Image.open(io.BytesIO(data["png"])) assert abs(im.size[0] - im_expected.size[0]) < 40 assert abs(im.size[1] - im_expected.size[1]) < 40
[ "os.geteuid", "io.StringIO", "io.BytesIO", "json.load", "os.path.dirname", "pytest.fixture", "altair_saver._utils.internet_connected", "json.dumps", "altair_data_server.Provider", "pytest.raises", "pytest.xfail", "altair_saver.HTMLSaver", "pytest.mark.parametrize", "os.path.join", "os.listdir" ]
[((382, 412), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (396, 412), False, 'import pytest\n'), ((475, 505), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (489, 505), False, 'import pytest\n'), ((612, 642), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (626, 642), False, 'import pytest\n'), ((1453, 1501), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[True, False]'], {}), "('inline', [True, False])\n", (1476, 1501), False, 'import pytest\n'), ((1503, 1570), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""embed_options"""', "[None, {'theme': 'dark'}]"], {}), "('embed_options', [None, {'theme': 'dark'}])\n", (1526, 1570), False, 'import pytest\n'), ((2203, 2270), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""embed_options"""', "[None, {'theme': 'dark'}]"], {}), "('embed_options', [None, {'theme': 'dark'}])\n", (2226, 2270), False, 'import pytest\n'), ((3006, 3054), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inline"""', '[True, False]'], {}), "('inline', [True, False])\n", (3029, 3054), False, 'import pytest\n'), ((4123, 4174), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""requirejs"""', '[True, False]'], {}), "('requirejs', [True, False])\n", (4146, 4174), False, 'import pytest\n'), ((451, 471), 'altair_saver._utils.internet_connected', 'internet_connected', ([], {}), '()\n', (469, 471), False, 'from altair_saver._utils import internet_connected\n'), ((559, 569), 'altair_data_server.Provider', 'Provider', ([], {}), '()\n', (567, 569), False, 'from altair_data_server import Provider\n'), ((1751, 1823), 'altair_saver.HTMLSaver', 'HTMLSaver', (["data['vega-lite']"], {'inline': 'inline', 'embed_options': 'embed_options'}), "(data['vega-lite'], inline=inline, embed_options=embed_options)\n", (1760, 1823), False, 'from altair_saver import HTMLSaver\n'), ((1833, 1846), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1844, 1846), False, 'import io\n'), ((2444, 2501), 'altair_saver.HTMLSaver', 'HTMLSaver', (["data['vega-lite']"], {'embed_options': 'embed_options'}), "(data['vega-lite'], embed_options=embed_options)\n", (2453, 2501), False, 'from altair_saver import HTMLSaver\n'), ((2864, 2877), 'altair_saver.HTMLSaver', 'HTMLSaver', (['{}'], {}), '({})\n', (2873, 2877), False, 'from altair_saver import HTMLSaver\n'), ((3331, 3374), 'altair_saver.HTMLSaver', 'HTMLSaver', (["data['vega-lite']"], {'inline': 'inline'}), "(data['vega-lite'], inline=inline)\n", (3340, 3374), False, 'from altair_saver import HTMLSaver\n'), ((3384, 3397), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3395, 3397), False, 'import io\n'), ((4504, 4532), 'altair_saver.HTMLSaver', 'HTMLSaver', (["data['vega-lite']"], {}), "(data['vega-lite'])\n", (4513, 4532), False, 'from altair_saver import HTMLSaver\n'), ((1060, 1085), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1075, 1085), False, 'import os\n'), ((1997, 2026), 'json.dumps', 'json.dumps', (["data['vega-lite']"], {}), "(data['vega-lite'])\n", (2007, 2026), False, 'import json\n'), ((2702, 2731), 'json.dumps', 'json.dumps', (["data['vega-lite']"], {}), "(data['vega-lite'])\n", (2712, 2731), False, 'import json\n'), ((2751, 2782), 'json.dumps', 'json.dumps', (['(embed_options or {})'], {}), '(embed_options or {})\n', (2761, 2782), False, 'import json\n'), ((2887, 2912), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2900, 2912), False, 'import pytest\n'), ((3269, 3318), 'pytest.xfail', 'pytest.xfail', (['"""Internet connection not available"""'], {}), "('Internet connection not available')\n", (3281, 3318), False, 'import pytest\n'), ((3712, 3727), 'io.BytesIO', 'io.BytesIO', (['png'], {}), '(png)\n', (3722, 3727), False, 'import io\n'), ((3987, 4010), 'io.BytesIO', 'io.BytesIO', (["data['png']"], {}), "(data['png'])\n", (3997, 4010), False, 'import io\n'), ((4442, 4491), 'pytest.xfail', 'pytest.xfail', (['"""Internet connection not available"""'], {}), "('Internet connection not available')\n", (4454, 4491), False, 'import pytest\n'), ((5149, 5164), 'io.BytesIO', 'io.BytesIO', (['png'], {}), '(png)\n', (5159, 5164), False, 'import io\n'), ((5424, 5447), 'io.BytesIO', 'io.BytesIO', (["data['png']"], {}), "(data['png'])\n", (5434, 5447), False, 'import io\n'), ((812, 824), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (822, 824), False, 'import os\n'), ((1290, 1302), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1299, 1302), False, 'import json\n'), ((1141, 1162), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1151, 1162), False, 'import os\n'), ((1223, 1265), 'os.path.join', 'os.path.join', (['directory', 'f"""{case}.vl.json"""'], {}), "(directory, f'{case}.vl.json')\n", (1235, 1265), False, 'import os\n'), ((1321, 1359), 'os.path.join', 'os.path.join', (['directory', 'f"""{case}.png"""'], {}), "(directory, f'{case}.png')\n", (1333, 1359), False, 'import os\n'), ((2066, 2097), 'json.dumps', 'json.dumps', (['(embed_options or {})'], {}), '(embed_options or {})\n', (2076, 2097), False, 'import json\n')]
from timeit import default_timer from parser import wikihandler import xml.sax as sax import utility def main(): # setting path to indices utility.setIndexPath() utility.setStatPath() # parser=sax.make_parser() # handler = wikihandler() # parser.setFeature(sax.handler.feature_namespaces,0) # parser.setContentHandler(handler) # parser.parse("/home/hitesh/sem3/IRE/Project/large.xml") # parser.parse("/home/hitesh/sem3/IRE/wiki/phase2/large.xml") # parser.parse("/mnt/sdb1/phase2/data/0.xml") # 1 # parser.parse("/mnt/sdb1/phase2/data/1.xml") # 2 # parser.parse("/mnt/sdb1/phase2/data/29.xml") # 3 for i in range(0,35): start = default_timer() parser=sax.make_parser() handler = wikihandler() parser.setFeature(sax.handler.feature_namespaces,0) parser.setContentHandler(handler) # this is the path to ur indices parser.parse("/mnt/sdb1/phase2/data/"+str(i)+".xml") stop = default_timer() print ('\nTime elasped in sec: ',stop - start) print('file no.:: ',str(i)) #wrie index here # handler.writeIndex(utility.getIndexPath()) # temp if __name__ == "__main__": start = default_timer() main() stop = default_timer() print ('\nTotal Time elasped in sec: ',stop - start)
[ "parser.wikihandler", "timeit.default_timer", "utility.setIndexPath", "utility.setStatPath", "xml.sax.make_parser" ]
[((141, 163), 'utility.setIndexPath', 'utility.setIndexPath', ([], {}), '()\n', (161, 163), False, 'import utility\n'), ((165, 186), 'utility.setStatPath', 'utility.setStatPath', ([], {}), '()\n', (184, 186), False, 'import utility\n'), ((1108, 1123), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1121, 1123), False, 'from timeit import default_timer\n'), ((1140, 1155), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1153, 1155), False, 'from timeit import default_timer\n'), ((644, 659), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (657, 659), False, 'from timeit import default_timer\n'), ((669, 686), 'xml.sax.make_parser', 'sax.make_parser', ([], {}), '()\n', (684, 686), True, 'import xml.sax as sax\n'), ((699, 712), 'parser.wikihandler', 'wikihandler', ([], {}), '()\n', (710, 712), False, 'from parser import wikihandler\n'), ((902, 917), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (915, 917), False, 'from timeit import default_timer\n')]
import boto3 exceptions = boto3.client('elb').exceptions AccessPointNotFoundException = exceptions.AccessPointNotFoundException CertificateNotFoundException = exceptions.CertificateNotFoundException DependencyThrottleException = exceptions.DependencyThrottleException DuplicateAccessPointNameException = exceptions.DuplicateAccessPointNameException DuplicateListenerException = exceptions.DuplicateListenerException DuplicatePolicyNameException = exceptions.DuplicatePolicyNameException DuplicateTagKeysException = exceptions.DuplicateTagKeysException InvalidConfigurationRequestException = exceptions.InvalidConfigurationRequestException InvalidEndPointException = exceptions.InvalidEndPointException InvalidSchemeException = exceptions.InvalidSchemeException InvalidSecurityGroupException = exceptions.InvalidSecurityGroupException InvalidSubnetException = exceptions.InvalidSubnetException ListenerNotFoundException = exceptions.ListenerNotFoundException LoadBalancerAttributeNotFoundException = exceptions.LoadBalancerAttributeNotFoundException OperationNotPermittedException = exceptions.OperationNotPermittedException PolicyNotFoundException = exceptions.PolicyNotFoundException PolicyTypeNotFoundException = exceptions.PolicyTypeNotFoundException SubnetNotFoundException = exceptions.SubnetNotFoundException TooManyAccessPointsException = exceptions.TooManyAccessPointsException TooManyPoliciesException = exceptions.TooManyPoliciesException TooManyTagsException = exceptions.TooManyTagsException UnsupportedProtocolException = exceptions.UnsupportedProtocolException
[ "boto3.client" ]
[((27, 46), 'boto3.client', 'boto3.client', (['"""elb"""'], {}), "('elb')\n", (39, 46), False, 'import boto3\n')]
# import os # # # path = '/home/yangyang/yangyang/DATA/gxw/dataset/DOTA_split/train' # # label_file_name = 'labelTxt' # # path = '/home/yangyang/yangyang/DATA/gxw/dataset/DOTA_demo/VOC2012' # label_file_name = 'Annotations' # # label_file_path = os.path.join(path, label_file_name) # filelist = os.listdir(label_file_path) # # # txt_path = os.path.join(path, 'train.txt') # txt_path = os.path.join(path, 'train.txt') # f = open(txt_path, 'a') # # for filename in filelist: # txt = filename.split('.')[0] # f.write('{}\n'.format(txt)) # # f.close() import os label_file_path = '/home/lyy/gxw/DOTA_OBB_1_5/VOC2012/JPEGImages-test' filelist = os.listdir(label_file_path) path = '/home/lyy/gxw' txt_path = os.path.join(path, 'test.txt') f = open(txt_path, 'a') for filename in filelist: txt = filename.split('.')[0] f.write('{}\n'.format(txt)) f.close()
[ "os.path.join", "os.listdir" ]
[((653, 680), 'os.listdir', 'os.listdir', (['label_file_path'], {}), '(label_file_path)\n', (663, 680), False, 'import os\n'), ((716, 746), 'os.path.join', 'os.path.join', (['path', '"""test.txt"""'], {}), "(path, 'test.txt')\n", (728, 746), False, 'import os\n')]
from pywire.signal import Signal from tkinter import * from tkinter.ttk import Separator from enum import Enum class BitState(Enum): TRUE = 1 FALSE = 2 TRUE_FORCED = 3 FALSE_FORCED = 4 UNDEFINED = 5 def bitsToInt(bit_array): for bit in bit_array: if bit.state == BitState.UNDEFINED: return None total = 0 for index in range(len(bit_array)): bit = bit_array[index] total *= 2 if bit.state == BitState.TRUE_FORCED or bit.state == BitState.TRUE: total += 1 return total class Bit: def press(self): print("PRESSED") if self.state == BitState.UNDEFINED: self.state = BitState.TRUE_FORCED elif self.state == BitState.TRUE_FORCED: self.state = BitState.FALSE_FORCED elif self.state == BitState.FALSE_FORCED: self.state = BitState.UNDEFINED elif self.state == BitState.TRUE: self.state = BitState.TRUE_FORCED elif self.state == BitState.FALSE: self.state = BitState.TRUE_FORCED else: raise Exception("WTF") self.update_gui() def update_gui(self): if self.state == BitState.UNDEFINED: self.entity.configure(text="UN") elif self.state == BitState.TRUE_FORCED: self.entity.configure(text="TF") elif self.state == BitState.FALSE_FORCED: self.entity.configure(text="FF") elif self.state == BitState.TRUE: self.entity.configure(text="T_") elif self.state == BitState.FALSE: self.entity.configure(text="F_") else: raise Exception("WTF: " + str(self.state)) def __init__(self, master, row, column): self.entity = Button(master, command=self.press) self.entity.grid(row=row, column=column) self.state = BitState.FALSE self.update_gui() def refresh(): globals()["app"].recalculate_states() class Application(Frame): def draw_signals(self, master, signals, start_row): for signal in signals: self.bits[signal.name] = [[None for bit_index in range(len(signal))] for t in range(self.time)] print("LABEL") Label(master, text=signal.name).grid(row=start_row, column=1) for bit_index in range(len(signal)): Label(master, text="<" + str(bit_index) + ">").grid(row=start_row, column=2) for time_stamp in range(self.time): self.bits[signal.name][time_stamp][bit_index] = Bit(master, start_row, time_stamp + 3) Separator(master, orient="horizontal").grid(row=start_row, column=time_stamp + 3, sticky=S + W + E) start_row += 1 start_row += 1 print("done") def createLayout(self, master): Button(master, text="Refresh", command=refresh).grid(row=0, column=0) for x in range(self.time): Label(master, text="t=" + str(x)).grid(row=1, column=x+3) row = 2 if self.input_signals: Label(master, text="inputs").grid(row=row, column=0) self.draw_signals(master, self.input_signals, row) row += sum([len(signal) for signal in self.input_signals])+3 Label(master, text=" ").grid(row=row-1, column=0) if self.other_signals: Label(master, text="other").grid(row=row, column=0) self.draw_signals(master, self.other_signals, row) row += sum([len(signal) for signal in self.other_signals]) + 3 Label(master, text=" ").grid(row=row-1, column=0) if self.output_signals: Label(master, text="outputs").grid(row=row, column=0) self.draw_signals(master, self.output_signals, row) row += sum([len(signal) for signal in self.output_signals]) + 3 Label(master, text=" ").grid(row=row-1, column=0) def recalculate_states(self): for time_stamp in range(0, self.time): for signal in Signal.all_signals: if signal.driving_signals: input_states = [] for input_signal in signal.driving_signals: if signal.clock: input_bits = self.bits[input_signal.name][time_stamp-1] else: input_bits = self.bits[input_signal.name][time_stamp] input_states.append(bitsToInt(input_bits)) output_val = signal.driving_function(*input_states) if isinstance(output_val, int): output_string = bin(output_val)[2:].rjust(len(signal), "0") output_string = output_string[len(output_string)-len(signal):] print(output_string) output_bool_array = [letter == "1" for letter in output_string] print(output_bool_array) signal_bits = self.bits[signal.name][time_stamp] for index in range(len(output_bool_array)): if signal_bits[index].state == BitState.TRUE_FORCED: pass elif signal_bits[index].state == BitState.FALSE_FORCED: pass elif output_bool_array[index]: signal_bits[index].state = BitState.TRUE else: signal_bits[index].state = BitState.FALSE elif isinstance(output_val, bool): for index in range(len(output_bool_array)): if output_val: signal_bits[index] = BitState.TRUE else: signal_bits[index] = BitState.FALSE else: raise Exception("Function output is not a boolean or int") for signal in Signal.all_signals: for bit_row in self.bits[signal.name]: for bit in bit_row: bit.update_gui() def __init__(self, master=None): self.bits = {} self.time = 10 signals = Signal.all_signals self.input_signals = list(filter(lambda x: x.io == "in", signals)) self.output_signals = list(filter(lambda x: x.io == "out", signals)) self.other_signals = list(filter(lambda x: not x.io, signals)) Frame.__init__(self, master) print("Creating layout") self.createLayout(master) print("DONE") self.recalculate_states() def launch_test(): root = Tk() app = Application(master=root) globals()["app"] = app app.mainloop() root.destroy() """ class TestObject: def __init__(self, signals=Signal.all_signals, inputs={}, turns=10): self.all_signals = signals self.in_signals = list(filter(lambda x: x.io == "in", signals)) self.out_signals = list(filter(lambda x: x.io == "out", signals)) def print(self, turn, signals=self.signals): """
[ "tkinter.ttk.Separator" ]
[((2652, 2690), 'tkinter.ttk.Separator', 'Separator', (['master'], {'orient': '"""horizontal"""'}), "(master, orient='horizontal')\n", (2661, 2690), False, 'from tkinter.ttk import Separator\n')]
# Copyright 2020, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for DP-enabled DNNClassifier.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized import tensorflow as tf from tensorflow_privacy.privacy.estimators import test_utils from tensorflow_privacy.privacy.estimators.v1 import dnn from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer class DPDNNClassifierTest(tf.test.TestCase, parameterized.TestCase): """Tests for DP-enabled DNNClassifier.""" @parameterized.named_parameters( ('BinaryClassDNN', 2), ('MultiClassDNN 3', 3), ('MultiClassDNN 4', 4), ) def testDNN(self, n_classes): train_features, train_labels = test_utils.make_input_data(256, n_classes) feature_columns = [] for key in train_features: feature_columns.append(tf.feature_column.numeric_column(key=key)) optimizer = functools.partial( DPGradientDescentGaussianOptimizer, learning_rate=0.5, l2_norm_clip=1.0, noise_multiplier=0.0, num_microbatches=1) classifier = dnn.DNNClassifier( hidden_units=[10], activation_fn='relu', feature_columns=feature_columns, n_classes=n_classes, optimizer=optimizer, loss_reduction=tf.losses.Reduction.NONE) classifier.train( input_fn=test_utils.make_input_fn(train_features, train_labels, True, 16)) test_features, test_labels = test_utils.make_input_data(64, n_classes) classifier.evaluate( input_fn=test_utils.make_input_fn(test_features, test_labels, False, 16)) predict_features, predict_labels = test_utils.make_input_data(64, n_classes) classifier.predict( input_fn=test_utils.make_input_fn(predict_features, predict_labels, False)) if __name__ == '__main__': tf.test.main()
[ "tensorflow.test.main", "functools.partial", "tensorflow.feature_column.numeric_column", "tensorflow_privacy.privacy.estimators.test_utils.make_input_fn", "tensorflow_privacy.privacy.estimators.test_utils.make_input_data", "tensorflow_privacy.privacy.estimators.v1.dnn.DNNClassifier", "absl.testing.parameterized.named_parameters" ]
[((1157, 1263), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('BinaryClassDNN', 2)", "('MultiClassDNN 3', 3)", "('MultiClassDNN 4', 4)"], {}), "(('BinaryClassDNN', 2), ('MultiClassDNN 3', 3\n ), ('MultiClassDNN 4', 4))\n", (1187, 1263), False, 'from absl.testing import parameterized\n'), ((2588, 2602), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2600, 2602), True, 'import tensorflow as tf\n'), ((1349, 1391), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_data', 'test_utils.make_input_data', (['(256)', 'n_classes'], {}), '(256, n_classes)\n', (1375, 1391), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n'), ((1537, 1673), 'functools.partial', 'functools.partial', (['DPGradientDescentGaussianOptimizer'], {'learning_rate': '(0.5)', 'l2_norm_clip': '(1.0)', 'noise_multiplier': '(0.0)', 'num_microbatches': '(1)'}), '(DPGradientDescentGaussianOptimizer, learning_rate=0.5,\n l2_norm_clip=1.0, noise_multiplier=0.0, num_microbatches=1)\n', (1554, 1673), False, 'import functools\n'), ((1729, 1912), 'tensorflow_privacy.privacy.estimators.v1.dnn.DNNClassifier', 'dnn.DNNClassifier', ([], {'hidden_units': '[10]', 'activation_fn': '"""relu"""', 'feature_columns': 'feature_columns', 'n_classes': 'n_classes', 'optimizer': 'optimizer', 'loss_reduction': 'tf.losses.Reduction.NONE'}), "(hidden_units=[10], activation_fn='relu', feature_columns=\n feature_columns, n_classes=n_classes, optimizer=optimizer,\n loss_reduction=tf.losses.Reduction.NONE)\n", (1746, 1912), False, 'from tensorflow_privacy.privacy.estimators.v1 import dnn\n'), ((2135, 2176), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_data', 'test_utils.make_input_data', (['(64)', 'n_classes'], {}), '(64, n_classes)\n', (2161, 2176), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n'), ((2366, 2407), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_data', 'test_utils.make_input_data', (['(64)', 'n_classes'], {}), '(64, n_classes)\n', (2392, 2407), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n'), ((1477, 1518), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', ([], {'key': 'key'}), '(key=key)\n', (1509, 1518), True, 'import tensorflow as tf\n'), ((1993, 2057), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_fn', 'test_utils.make_input_fn', (['train_features', 'train_labels', '(True)', '(16)'], {}), '(train_features, train_labels, True, 16)\n', (2017, 2057), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n'), ((2219, 2282), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_fn', 'test_utils.make_input_fn', (['test_features', 'test_labels', '(False)', '(16)'], {}), '(test_features, test_labels, False, 16)\n', (2243, 2282), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n'), ((2449, 2514), 'tensorflow_privacy.privacy.estimators.test_utils.make_input_fn', 'test_utils.make_input_fn', (['predict_features', 'predict_labels', '(False)'], {}), '(predict_features, predict_labels, False)\n', (2473, 2514), False, 'from tensorflow_privacy.privacy.estimators import test_utils\n')]
import unittest from numpy import hstack, max, abs, sqrt from cantera import Solution, gas_constant import numpy as np from spitfire import ChemicalMechanismSpec from os.path import join, abspath from subprocess import getoutput test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls')) mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')] def rhs_cantera(p_arg, T_arg, y_arg, rhoin, Tin_arg, yin_arg, tau_arg, gas, rhs_chem_in): gas.TPY = T_arg, p_arg, y_arg rho = gas.density_mass cv = gas.cv_mass e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights gas.TDY = Tin_arg, rhoin, yin_arg ein = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights rhs = np.copy(rhs_chem_in) rhsMass = np.zeros(gas.n_species + 1) rhsMass[0] += (rhoin - rho) rhsMass[1] += 1. / (rho * cv) * (rhoin * np.sum(yin_arg * (ein - e))) rhsMass[2:] += rhoin / rho * (yin_arg[:-1] - y_arg[:-1]) rhs += rhsMass / tau_arg return rhs def validate_on_mechanism(mech, temperature, pressure, tau, do_rhs, do_jac): xml = join(test_mech_directory, mech + '.xml') T = temperature Tin = T + 1000. p = pressure r = ChemicalMechanismSpec(xml, 'gas').griffon gas = Solution(xml) ns = gas.n_species y = np.ones(ns) # equal masses in the reactor gas.TPY = T, p, y y = np.copy(gas.Y) rho = gas.density_mass xin = np.ones(ns) # equal moles in the feed gas.TPX = Tin, p, xin yin = np.copy(gas.Y) rhoin = gas.density_mass state = hstack((rho, T, y[:-1])) rhsGRChemOnly = np.zeros(ns + 1) r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGRChemOnly) rhsCN = rhs_cantera(p, T, y, rhoin, Tin, yin, tau, gas, rhsGRChemOnly) rhsGR = np.empty(ns + 1) r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR) if do_rhs: return max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps) if do_jac: jacGR = np.empty((ns + 1) * (ns + 1)) r.reactor_jac_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, 0, rhsGR, jacGR) jacGR = jacGR.reshape((ns + 1, ns + 1), order='F') drho = 1.e-6 dT = 1.e-6 dY = 1.e-6 jacFD = np.empty((ns + 1, ns + 1)) rhsGR1, rhsGR2 = np.empty(ns + 1), np.empty(ns + 1) state_m = hstack((rho - drho, T, y[:-1])) state_p = hstack((rho + drho, T, y[:-1])) r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1) r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2) jacFD[:, 0] = (- rhsGR1 + rhsGR2) / (2. * drho) state_m = hstack((rho, T - dT, y[:-1])) state_p = hstack((rho, T + dT, y[:-1])) r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1) r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2) jacFD[:, 1] = (- rhsGR1 + rhsGR2) / (2. * dT) for i in range(ns - 1): y_m1, y_p1 = np.copy(y), np.copy(y) y_m1[i] += - dY y_m1[-1] -= - dY y_p1[i] += dY y_p1[-1] -= dY state_m = hstack((rho, T, y_m1[:-1])) state_p = hstack((rho, T, y_p1[:-1])) r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1) r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2) jacFD[:, 2 + i] = (- rhsGR1 + rhsGR2) / (2. * dY) return max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 4.e-3 def create_test(m, T, p, tau, do_rhs, do_jac): def test(self): self.assertTrue(validate_on_mechanism(m, T, p, tau, do_rhs, do_jac)) return test class Accuracy(unittest.TestCase): pass tau_list = [1.e-6, 1.e-3] for mech in mechs: for tau in tau_list: rhsname = 'test_rhs_' + mech + '_' + 'tau=' + str(tau) jacname = 'test_jac_' + mech + '_' + 'tau=' + str(tau) setattr(Accuracy, rhsname, create_test(mech, 600., 101325, tau, True, False)) if 'methane' not in mech: # skip methane in the finite difference Jacobian tests setattr(Accuracy, jacname, create_test(mech, 600., 101325, tau, False, True)) if __name__ == '__main__': unittest.main()
[ "unittest.main", "spitfire.ChemicalMechanismSpec", "numpy.sum", "numpy.abs", "numpy.copy", "numpy.empty", "numpy.zeros", "numpy.ones", "numpy.hstack", "numpy.finfo", "cantera.Solution", "subprocess.getoutput", "os.path.join" ]
[((260, 304), 'os.path.join', 'join', (['"""tests"""', '"""test_mechanisms"""', '"""old_xmls"""'], {}), "('tests', 'test_mechanisms', 'old_xmls')\n", (264, 304), False, 'from os.path import join, abspath\n'), ((809, 829), 'numpy.copy', 'np.copy', (['rhs_chem_in'], {}), '(rhs_chem_in)\n', (816, 829), True, 'import numpy as np\n'), ((844, 871), 'numpy.zeros', 'np.zeros', (['(gas.n_species + 1)'], {}), '(gas.n_species + 1)\n', (852, 871), True, 'import numpy as np\n'), ((1172, 1212), 'os.path.join', 'join', (['test_mech_directory', "(mech + '.xml')"], {}), "(test_mech_directory, mech + '.xml')\n", (1176, 1212), False, 'from os.path import join, abspath\n'), ((1332, 1345), 'cantera.Solution', 'Solution', (['xml'], {}), '(xml)\n', (1340, 1345), False, 'from cantera import Solution, gas_constant\n'), ((1378, 1389), 'numpy.ones', 'np.ones', (['ns'], {}), '(ns)\n', (1385, 1389), True, 'import numpy as np\n'), ((1451, 1465), 'numpy.copy', 'np.copy', (['gas.Y'], {}), '(gas.Y)\n', (1458, 1465), True, 'import numpy as np\n'), ((1504, 1515), 'numpy.ones', 'np.ones', (['ns'], {}), '(ns)\n', (1511, 1515), True, 'import numpy as np\n'), ((1579, 1593), 'numpy.copy', 'np.copy', (['gas.Y'], {}), '(gas.Y)\n', (1586, 1593), True, 'import numpy as np\n'), ((1636, 1660), 'numpy.hstack', 'hstack', (['(rho, T, y[:-1])'], {}), '((rho, T, y[:-1]))\n', (1642, 1660), False, 'from numpy import hstack, max, abs, sqrt\n'), ((1682, 1698), 'numpy.zeros', 'np.zeros', (['(ns + 1)'], {}), '(ns + 1)\n', (1690, 1698), True, 'import numpy as np\n'), ((1883, 1899), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (1891, 1899), True, 'import numpy as np\n'), ((4496, 4511), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4509, 4511), False, 'import unittest\n'), ((1279, 1312), 'spitfire.ChemicalMechanismSpec', 'ChemicalMechanismSpec', (['xml', '"""gas"""'], {}), "(xml, 'gas')\n", (1300, 1312), False, 'from spitfire import ChemicalMechanismSpec\n'), ((2130, 2159), 'numpy.empty', 'np.empty', (['((ns + 1) * (ns + 1))'], {}), '((ns + 1) * (ns + 1))\n', (2138, 2159), True, 'import numpy as np\n'), ((2397, 2423), 'numpy.empty', 'np.empty', (['(ns + 1, ns + 1)'], {}), '((ns + 1, ns + 1))\n', (2405, 2423), True, 'import numpy as np\n'), ((2503, 2534), 'numpy.hstack', 'hstack', (['(rho - drho, T, y[:-1])'], {}), '((rho - drho, T, y[:-1]))\n', (2509, 2534), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2553, 2584), 'numpy.hstack', 'hstack', (['(rho + drho, T, y[:-1])'], {}), '((rho + drho, T, y[:-1]))\n', (2559, 2584), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2850, 2879), 'numpy.hstack', 'hstack', (['(rho, T - dT, y[:-1])'], {}), '((rho, T - dT, y[:-1]))\n', (2856, 2879), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2898, 2927), 'numpy.hstack', 'hstack', (['(rho, T + dT, y[:-1])'], {}), '((rho, T + dT, y[:-1]))\n', (2904, 2927), False, 'from numpy import hstack, max, abs, sqrt\n'), ((949, 976), 'numpy.sum', 'np.sum', (['(yin_arg * (ein - e))'], {}), '(yin_arg * (ein - e))\n', (955, 976), True, 'import numpy as np\n'), ((2449, 2465), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (2457, 2465), True, 'import numpy as np\n'), ((2467, 2483), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (2475, 2483), True, 'import numpy as np\n'), ((3385, 3412), 'numpy.hstack', 'hstack', (['(rho, T, y_m1[:-1])'], {}), '((rho, T, y_m1[:-1]))\n', (3391, 3412), False, 'from numpy import hstack, max, abs, sqrt\n'), ((3435, 3462), 'numpy.hstack', 'hstack', (['(rho, T, y_p1[:-1])'], {}), '((rho, T, y_p1[:-1]))\n', (3441, 3462), False, 'from numpy import hstack, max, abs, sqrt\n'), ((346, 401), 'subprocess.getoutput', 'getoutput', (["('ls ' + test_mech_directory + ' | grep .xml')"], {}), "('ls ' + test_mech_directory + ' | grep .xml')\n", (355, 401), False, 'from subprocess import getoutput\n'), ((3230, 3240), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (3237, 3240), True, 'import numpy as np\n'), ((3242, 3252), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (3249, 3252), True, 'import numpy as np\n'), ((2023, 2041), 'numpy.abs', 'abs', (['(rhsGR - rhsCN)'], {}), '(rhsGR - rhsCN)\n', (2026, 2041), False, 'from numpy import hstack, max, abs, sqrt\n'), ((3743, 3761), 'numpy.abs', 'abs', (['(jacGR - jacFD)'], {}), '(jacGR - jacFD)\n', (3746, 3761), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2045, 2055), 'numpy.abs', 'abs', (['rhsCN'], {}), '(rhsCN)\n', (2048, 2055), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2077, 2092), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2085, 2092), True, 'import numpy as np\n'), ((3765, 3775), 'numpy.abs', 'abs', (['jacGR'], {}), '(jacGR)\n', (3768, 3775), False, 'from numpy import hstack, max, abs, sqrt\n')]
from django import forms from django.forms import ModelForm from django.conf import settings from members.models import User from .models import Unknowntag class SelectUserForm(forms.Form): user = forms.ModelChoiceField(queryset=User.objects.all()) activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.') class SelectTagForm(forms.Form): tag = forms.ModelChoiceField(queryset=Unknowntag.objects.all()) activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.')
[ "django.forms.BooleanField", "members.models.User.objects.all" ]
[((276, 388), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'initial': '(True)', 'help_text': '"""Also give this user door permits if they did not have it yet."""'}), "(initial=True, help_text=\n 'Also give this user door permits if they did not have it yet.')\n", (294, 388), False, 'from django import forms\n'), ((509, 621), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'initial': '(True)', 'help_text': '"""Also give this user door permits if they did not have it yet."""'}), "(initial=True, help_text=\n 'Also give this user door permits if they did not have it yet.')\n", (527, 621), False, 'from django import forms\n'), ((235, 253), 'members.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (251, 253), False, 'from members.models import User\n')]
from storage_bucket.bucket import get_bucket def upload_file( *, file_content: bytes, storage_bucket_name: str, filename: str, content_type: str = 'application/octet-stream', **kwargs: dict, ) -> None: """ Upload content of file_data to a google cloud storage bucket. .. versionadded:: 0.0.1 :param file_data: contents to upload in bytes :type file_data: bytes :param bucket_name: Name of the google cloud bucket :type bucket_name: str :param filename: The name to give the uploaded file :type filename: str :param content_type: What type of file to create, defaults to text/plain :type content_type: str :return: None """ bucket = get_bucket(storage_bucket_name=storage_bucket_name) blob = bucket.blob(filename) blob.upload_from_string( file_content, content_type=content_type, **kwargs, )
[ "storage_bucket.bucket.get_bucket" ]
[((720, 771), 'storage_bucket.bucket.get_bucket', 'get_bucket', ([], {'storage_bucket_name': 'storage_bucket_name'}), '(storage_bucket_name=storage_bucket_name)\n', (730, 771), False, 'from storage_bucket.bucket import get_bucket\n')]
import pytest import test.mock_data_gateway from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions from blades_helper.mission_generator_constants import MissionGeneratorConstants as con def setup_one_mission_base_build(mock, note, type): mock.mission_counts.append((1, note)) mock.mission_types.append(type) def check_array(source, target): assert len(source)==len(target) for i in range(len(source)): assert source[i]==target[i] def check_mission_type(mission, mission_type): assert mission.mission_type == mission_type def check_note_len(mission, notes_len): assert len(mission.notes) == notes_len def check_for_note(mission, note_to_find): for note in mission.notes: if note_to_find in note: return assert False def check_requirement(mission, specialist): return specialist in mission.requirements def check_for_note_plus_one_specialist(mission, specialist): check_for_note(mission, "Mission can include one additional specialist") check_requirement(mission, specialist) def check_for_note_favor(mission, favor_type): check_for_note(mission, con.FAVOR_NOTE[0:-3]) check_for_note(mission, favor_type) def check_mission(mission, mission_type, target, rewards, penalties, notes_len, requirement, contained_notes): check_mission_type(mission, mission_type) assert mission.target == target check_array(mission.rewards, rewards) check_array(mission.penalties, penalties) check_note_len(mission, notes_len) if not requirement == con.NOTHING: check_requirement(mission, requirement) for contained_note in contained_notes: check_for_note(mission, contained_note) def test_get_next_mission_type(): assert _get_next_mission_type(con.ASSAULT) == con.RECON assert _get_next_mission_type(con.RECON) == con.RELIGIOUS assert _get_next_mission_type(con.RELIGIOUS)==con.SUPPLY assert _get_next_mission_type(con.SUPPLY) == con.COMMANDER_FOCUS with pytest.raises(AssertionError): _get_next_mission_type(con.COMMANDER_FOCUS) with pytest.raises(AssertionError): _get_next_mission_type(con.GM_CHOICE) def test_can_use_mission_type(): assert not _can_use_mission_type(con.SUPPLY, [con.ASSAULT]) assert _can_use_mission_type(con.SPECIAL, [con.SPECIAL]) assert _can_use_mission_type(con.GM_CHOICE, [con.GM_CHOICE]) assert not _can_use_mission_type(con.SUPPLY, []) assert _can_use_mission_type(con.SUPPLY, [con.SUPPLY]) assert _can_use_mission_type(con.SUPPLY, [con.ASSAULT, con.SUPPLY]) assert _can_use_mission_type(con.SUPPLY, [con.SUPPLY, con.ASSAULT]) def test_make_one_mission(): mock = test.mock_data_gateway.MockDataGateway() mock.titles.append('bunker hill') setup_one_mission_base_build(mock, con.NOTHING, con.RELIGIOUS) missions =_generate_base_missions(mock, False, False, False, con.SUPPLY, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] ) assert len(missions)==1 mission = missions[0] check_mission(mission, con.RELIGIOUS, con.NOTHING, [], [], 0, con.required_religious_specialists, []) def test_one_has_favor(): mock = test.mock_data_gateway.MockDataGateway() mock.favor_types.append(con.THE_WILD) setup_one_mission_base_build(mock, con.ONE_HAS_FAVOR, con.SUPPLY) missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] ) assert len(missions)==1 mission = missions[0] check_mission(mission,con.SUPPLY,con.NOTHING,[],[],1,con.required_supply_specialists,[con.FAVOR_NOTE[0:-3], con.THE_WILD]) def test_one_extra_specialist(): mock = test.mock_data_gateway.MockDataGateway() mock.specialists.append(con.SNIPER) setup_one_mission_base_build(mock, con.PLUS_ONE_SPECIALIST, con.SUPPLY) missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] ) mission = missions[0] check_mission(mission, con.SUPPLY, con.NOTHING, [],[], 1, con.required_supply_specialists, ["Mission can include one additional specialist"]) check_for_note_plus_one_specialist(mission, con.SNIPER) def test_commanders_focus(): mock = test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.NOTHING, con.COMMANDER_FOCUS) missions =_generate_base_missions( mock, False, False, False, con.RECON, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] ) mission = missions[0] check_mission(mission, con.RECON, con.NOTHING, [],[], 0,con.required_recon_specialists,[]) def test_gm_choice(): mock = test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.NOTHING, con.GM_CHOICE) missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.RECON, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON] ) mission = missions[0] check_mission(mission, con.RECON, con.NOTHING, [],[], 0,con.required_recon_specialists,[]) def test_unavailable_mission(): #test simple unavailability mock = test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.NOTHING, con.RECON) missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.RECON, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) mission = missions[0] check_mission(mission, con.RELIGIOUS, con.NOTHING, [],[], 0,con.required_religious_specialists,[]) #test when mutiple missions unavailable setup_one_mission_base_build(mock, con.NOTHING, con.ASSAULT) missions =_generate_base_missions( mock, False, False, False, con.UNDEFINED, con.ASSAULT, [] ) mission = missions[0] check_mission(mission, con.UNDEFINED, con.NOTHING, [],[], 0,con.NOTHING,[]) def test_special_missions_are_allowed(): # special mock = test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.NOTHING, con.SPECIAL) missions =_generate_base_missions( mock, False, False, False, con.ASSAULT, con.ASSAULT, [] ) mission = missions[0] check_mission(mission, con.SPECIAL, con.NOTHING, [],[], 0,con.NOTHING,[]) def create_mission_with_gm_choice_and_note(mock, choice, note, spymaster_buy=False): setup_one_mission_base_build(mock, note, con.GM_CHOICE) missions =_generate_base_missions( mock, spymaster_buy, False, False, con.ASSAULT, choice, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) assert len(missions) == 1 return missions[0] def create_mission_with_commander_focus_and_note(mock, focus,note, spymaster_buy=False): setup_one_mission_base_build(mock, note, con.COMMANDER_FOCUS) missions =_generate_base_missions( mock, spymaster_buy, False, False, focus, con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) assert len(missions) == 1 return missions[0] def test_commander_focus_plus_one_specialist(): mock = test.mock_data_gateway.MockDataGateway() mock.specialists.append(con.HEAVY) focus=con.SUPPLY note=con.PLUS_ONE_SPECIALIST mission=create_mission_with_commander_focus_and_note(mock, focus, note) check_mission_type(mission, focus) check_note_len(mission, 1) check_for_note_plus_one_specialist(mission, con.HEAVY) def test_commander_focus_one_has_favor(): mock = test.mock_data_gateway.MockDataGateway() mock.favor_types.append(con.HOLY) focus=con.SUPPLY note=con.ONE_HAS_FAVOR mission=create_mission_with_commander_focus_and_note(mock, focus, note) check_mission_type(mission, focus) check_note_len(mission, 1) check_for_note_favor(mission, con.HOLY) def test_commander_focus_one_is_special(): mock = test.mock_data_gateway.MockDataGateway() focus=con.SUPPLY note=con.ONE_IS_SPECIAL mission=create_mission_with_commander_focus_and_note(mock, focus, note) check_mission_type(mission, con.SPECIAL) check_note_len(mission, 0) def test_gm_choice_plus_one_specialist(): mock = test.mock_data_gateway.MockDataGateway() mock.specialists.append(con.HEAVY) choice=con.SUPPLY note=con.PLUS_ONE_SPECIALIST mission=create_mission_with_gm_choice_and_note(mock, choice, note) check_mission_type(mission, choice) check_note_len(mission, 1) check_for_note_plus_one_specialist(mission, con.HEAVY) def test_gm_choice_one_has_favor(): mock = test.mock_data_gateway.MockDataGateway() mock.favor_types.append(con.HOLY) choice=con.SUPPLY note=con.ONE_HAS_FAVOR mission=create_mission_with_gm_choice_and_note(mock, choice, note) check_mission_type(mission, choice) check_note_len(mission, 1) check_for_note_favor(mission, con.HOLY) def test_gm_choice_one_is_special(): mock = test.mock_data_gateway.MockDataGateway() choice=con.SUPPLY note=con.ONE_IS_SPECIAL mission=create_mission_with_gm_choice_and_note(mock, choice, note) check_mission_type(mission, con.SPECIAL) check_note_len(mission, 0) def test_simple_spymaster_spend(): mock=test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.NOTHING, con.SUPPLY) missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) assert len(missions) == 1 check_mission_type(missions[0], con.SPECIAL) def test_one_mission_with_spymaster_and_one_is_special(): mock=test.mock_data_gateway.MockDataGateway() setup_one_mission_base_build(mock, con.ONE_IS_SPECIAL, con.SUPPLY) missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) assert len(missions) == 1 check_mission_type(missions[0], con.SPECIAL) check_note_len(missions[0],0) def test_two_missions_with_spymaster_and_one_is_special(): mock=test.mock_data_gateway.MockDataGateway() mock.mission_counts.append((2, con.ONE_IS_SPECIAL)) mock.mission_types.append(con.RECON) mock.mission_types.append(con.SUPPLY) missions =_generate_base_missions( mock, True, False, False, con.ASSAULT,con.ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT] ) assert len(missions) == 2 check_mission_type(missions[0], con.SPECIAL) check_note_len(missions[0],0) check_mission_type(missions[1], con.SPECIAL) check_note_len(missions[1],0) def setup_three_missions(mock, note, first_type, second_type, third_type, commanders_focus, gms_choice, spymaster_buy=False): mock.mission_counts.append((3, note)) mock.mission_types.append(first_type) mock.mission_types.append(second_type) mock.mission_types.append(third_type) missions = _generate_base_missions(mock, spymaster_buy, False, False, commanders_focus, gms_choice, [con.ASSAULT, con.RECON, con.SUPPLY, con.RELIGIOUS]) assert len(missions)==3 return missions def test_three_simple_missions(): mock=test.mock_data_gateway.MockDataGateway() note=con.NOTHING first_type=con.RELIGIOUS second_type=con.SUPPLY third_type=con.RECON spymaster_buy=False commander_focus=con.ASSAULT gm_choice=con.ASSAULT missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy) check_mission_type(missions[0], con.RELIGIOUS) check_mission_type(missions[1], con.SUPPLY) check_mission_type(missions[2], con.RECON) def test_three_missions_with_one_is_special(): mock=test.mock_data_gateway.MockDataGateway() mock.random_missions.append(1) mock.specialists.append(con.HEAVY) note=con.PLUS_ONE_SPECIALIST first_type=con.RELIGIOUS second_type=con.SUPPLY third_type=con.RECON spymaster_buy=False commander_focus=con.ASSAULT gm_choice=con.ASSAULT missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy) check_mission_type(missions[0], con.RELIGIOUS) check_mission_type(missions[1], con.SUPPLY) check_mission_type(missions[2], con.RECON) check_for_note_plus_one_specialist(missions[1],con.HEAVY) def test_three_missions_with_spymaster_buy_and_one_is_special(): mock=test.mock_data_gateway.MockDataGateway() note=con.ONE_IS_SPECIAL first_type=con.RELIGIOUS second_type=con.SUPPLY third_type=con.RECON spymaster_buy=True commander_focus=con.ASSAULT gm_choice=con.ASSAULT missions=setup_three_missions(mock, note, first_type, second_type, third_type, commander_focus, gm_choice, spymaster_buy) check_mission_type(missions[0], con.SPECIAL) check_mission_type(missions[1], con.SPECIAL) check_mission_type(missions[2], con.RELIGIOUS)
[ "pytest.raises", "blades_helper.mission_generator._can_use_mission_type", "blades_helper.mission_generator._generate_base_missions", "blades_helper.mission_generator._get_next_mission_type" ]
[((2326, 2375), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SPECIAL', '[con.SPECIAL]'], {}), '(con.SPECIAL, [con.SPECIAL])\n', (2347, 2375), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2387, 2440), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.GM_CHOICE', '[con.GM_CHOICE]'], {}), '(con.GM_CHOICE, [con.GM_CHOICE])\n', (2408, 2440), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2505, 2552), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SUPPLY', '[con.SUPPLY]'], {}), '(con.SUPPLY, [con.SUPPLY])\n', (2526, 2552), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2564, 2624), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SUPPLY', '[con.ASSAULT, con.SUPPLY]'], {}), '(con.SUPPLY, [con.ASSAULT, con.SUPPLY])\n', (2585, 2624), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2636, 2696), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SUPPLY', '[con.SUPPLY, con.ASSAULT]'], {}), '(con.SUPPLY, [con.SUPPLY, con.ASSAULT])\n', (2657, 2696), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2900, 3032), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.SUPPLY', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON]'], {}), '(mock, False, False, False, con.SUPPLY, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON])\n', (2923, 3032), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((3396, 3529), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON]'], {}), '(mock, False, False, False, con.ASSAULT, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON])\n', (3419, 3529), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((3926, 4059), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON]'], {}), '(mock, False, False, False, con.ASSAULT, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON])\n', (3949, 4059), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((4465, 4596), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.RECON', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON]'], {}), '(mock, False, False, False, con.RECON, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON])\n', (4488, 4596), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((4882, 5013), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.ASSAULT', 'con.RECON', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON]'], {}), '(mock, False, False, False, con.ASSAULT, con.RECON,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT, con.RECON])\n', (4905, 5013), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((5334, 5454), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.ASSAULT', 'con.RECON', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, False, False, False, con.ASSAULT, con.RECON,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (5357, 5454), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((5706, 5793), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.UNDEFINED', 'con.ASSAULT', '[]'], {}), '(mock, False, False, False, con.UNDEFINED, con.\n ASSAULT, [])\n', (5729, 5793), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((6089, 6174), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(False)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[]'], {}), '(mock, False, False, False, con.ASSAULT, con.ASSAULT, []\n )\n', (6112, 6174), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((6437, 6562), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', 'spymaster_buy', '(False)', '(False)', 'con.ASSAULT', 'choice', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, spymaster_buy, False, False, con.ASSAULT,\n choice, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (6460, 6562), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((6786, 6911), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', 'spymaster_buy', '(False)', '(False)', 'focus', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, spymaster_buy, False, False, focus, con.\n ASSAULT, [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (6809, 6911), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((9259, 9380), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(True)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, True, False, False, con.ASSAULT, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (9282, 9380), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((9656, 9777), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(True)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, True, False, False, con.ASSAULT, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (9679, 9777), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((10152, 10273), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', '(True)', '(False)', '(False)', 'con.ASSAULT', 'con.ASSAULT', '[con.RELIGIOUS, con.SUPPLY, con.ASSAULT]'], {}), '(mock, True, False, False, con.ASSAULT, con.ASSAULT,\n [con.RELIGIOUS, con.SUPPLY, con.ASSAULT])\n', (10175, 10273), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((10779, 10924), 'blades_helper.mission_generator._generate_base_missions', '_generate_base_missions', (['mock', 'spymaster_buy', '(False)', '(False)', 'commanders_focus', 'gms_choice', '[con.ASSAULT, con.RECON, con.SUPPLY, con.RELIGIOUS]'], {}), '(mock, spymaster_buy, False, False, commanders_focus,\n gms_choice, [con.ASSAULT, con.RECON, con.SUPPLY, con.RELIGIOUS])\n', (10802, 10924), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((1797, 1832), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.ASSAULT'], {}), '(con.ASSAULT)\n', (1819, 1832), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((1857, 1890), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.RECON'], {}), '(con.RECON)\n', (1879, 1890), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((1919, 1956), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.RELIGIOUS'], {}), '(con.RELIGIOUS)\n', (1941, 1956), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((1980, 2014), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.SUPPLY'], {}), '(con.SUPPLY)\n', (2002, 2014), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2048, 2077), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2061, 2077), False, 'import pytest\n'), ((2087, 2130), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.COMMANDER_FOCUS'], {}), '(con.COMMANDER_FOCUS)\n', (2109, 2130), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2140, 2169), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2153, 2169), False, 'import pytest\n'), ((2179, 2216), 'blades_helper.mission_generator._get_next_mission_type', '_get_next_mission_type', (['con.GM_CHOICE'], {}), '(con.GM_CHOICE)\n', (2201, 2216), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2266, 2314), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SUPPLY', '[con.ASSAULT]'], {}), '(con.SUPPLY, [con.ASSAULT])\n', (2287, 2314), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n'), ((2456, 2493), 'blades_helper.mission_generator._can_use_mission_type', '_can_use_mission_type', (['con.SUPPLY', '[]'], {}), '(con.SUPPLY, [])\n', (2477, 2493), False, 'from blades_helper.mission_generator import _get_next_mission_type, _can_use_mission_type, _generate_base_missions\n')]
from typing import Any, List from app.schemas.category import CategoryResponse from fastapi import APIRouter, Depends, status, HTTPException from sqlalchemy.orm import Session from app import crud, schemas from app.api import deps router = APIRouter() @router.get( "/", response_model=List[schemas.CategoryResponse], status_code=status.HTTP_200_OK ) def read_categories( db: Session = Depends(deps.get_db), skip: int = 0, limit: int = 100 ) -> Any: return crud.read_categories(db, skip=skip, limit=limit) @router.post("/", response_model=CategoryResponse, status_code=status.HTTP_200_OK) def create_category( obj_in: schemas.CategoryCreate, db: Session = Depends(deps.get_db) ) -> Any: is_a_registered_category = crud.get_category_by_name(db, obj_in.name) if is_a_registered_category: raise HTTPException(status_code=400, detail="Category already registered") return crud.create_category(db, obj_in=obj_in)
[ "app.crud.create_category", "app.crud.get_category_by_name", "fastapi.HTTPException", "app.crud.read_categories", "fastapi.Depends", "fastapi.APIRouter" ]
[((243, 254), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (252, 254), False, 'from fastapi import APIRouter, Depends, status, HTTPException\n'), ((398, 418), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (405, 418), False, 'from fastapi import APIRouter, Depends, status, HTTPException\n'), ((473, 521), 'app.crud.read_categories', 'crud.read_categories', (['db'], {'skip': 'skip', 'limit': 'limit'}), '(db, skip=skip, limit=limit)\n', (493, 521), False, 'from app import crud, schemas\n'), ((678, 698), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (685, 698), False, 'from fastapi import APIRouter, Depends, status, HTTPException\n'), ((740, 782), 'app.crud.get_category_by_name', 'crud.get_category_by_name', (['db', 'obj_in.name'], {}), '(db, obj_in.name)\n', (765, 782), False, 'from app import crud, schemas\n'), ((912, 951), 'app.crud.create_category', 'crud.create_category', (['db'], {'obj_in': 'obj_in'}), '(db, obj_in=obj_in)\n', (932, 951), False, 'from app import crud, schemas\n'), ((831, 899), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Category already registered"""'}), "(status_code=400, detail='Category already registered')\n", (844, 899), False, 'from fastapi import APIRouter, Depends, status, HTTPException\n')]
"""Minimal example dumping whatever event it receives.""" import time import logging import argparse from proglove_streams.logging import init_logging from proglove_streams.client import Client from proglove_streams.gateway import Gateway, GatewayMessageHandler from proglove_streams.exception import ProgloveStreamsException from proglove_streams.models.scan import ScanEvent from proglove_streams.models.scanner_state import ScannerStateEvent from proglove_streams.models.error import ErrorEvent from proglove_streams.models.gateway_state import GatewayStateEvent from proglove_streams.models.button_pressed import ButtonPressedEvent logger = logging.getLogger(__name__) def _set_display(client: Gateway, event: ScanEvent): client.set_display(str(event.device_serial), 'PG3', display_fields=[ { "display_field_id": 1, "display_field_header": "Storage Unit", "display_field_text": "R15" }, { "display_field_id": 2, "display_field_header": "Item", "display_field_text": "Engine 12" }, { "display_field_id": 3, "display_field_header": "Quantity", "display_field_text": "10" } ]) def _block_trigger(client: Gateway, event: ScanEvent): client.set_trigger_block(str(event.device_serial), True, ["TRIGGER_SINGLE_CLICK"], [], time_validity_duration=3000) def _unblock_trigger(client: Gateway, event: ScanEvent): client.set_trigger_block(str(event.device_serial), False, [], []) def on_connected(_client: Client, event: ScannerStateEvent) -> None: """On connected event callback.""" logger.info('device connected: %s', event.device_serial) def on_disconnected(_client: Client, event: ScannerStateEvent) -> None: """On disconnected event callback.""" logger.info('device disconnected: %s', event.device_serial) def on_scan(client: Client, event: ScanEvent) -> None: """On scan event callback.""" if not isinstance(client, Gateway): return logger.info( 'scan received: device %s, data: %s', event.device_serial, repr(event.scan_code) ) scan_code = str(event.scan_code).split('\r')[0] if scan_code == 'DISPLAY': _set_display(client, event) elif scan_code == 'BLOCK': _block_trigger(client, event) elif scan_code == 'UNBLOCK': _unblock_trigger(client, event) elif scan_code == 'FEEDBACK_OK': client.send_feedback(str(event.device_serial), 'FEEDBACK_POSITIVE') elif scan_code == 'FEEDBACK_NOK': client.send_feedback(str(event.device_serial), 'FEEDBACK_NEGATIVE') elif scan_code == 'STATE': client.get_gateway_state() def on_error(_client: Client, event: ErrorEvent) -> None: """On error event callback.""" logger.info('error received: %s', event.error_code) def on_gateway_state_event(_client: Client, event: GatewayStateEvent): """On Gateway state event callback.""" logger.info('''Gateway state received: serial: %s version: %s connected devices: %s''', event.gateway_serial, event.gateway_app_version, ','.join([d.device_serial for d in event.device_connected_list])) def on_button_pressed_event(_client: Client, event: ButtonPressedEvent) -> None: """On error event callback.""" logger.info('button pressed: device %s, trigger gesture: %s', event.device_serial, event.trigger_gesture) def app_example(): """Example of Streams API usage.""" parser = argparse.ArgumentParser('proglove_streams') parser.add_argument( '-L', '--logging-level', help='set the logging level (default is DEBUG)', type=str, metavar='LEVEL', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'), default='DEBUG' ) parser.add_argument( '-b', '--baudrate', help='use a specific baudarate (default is 115200)', type=int, metavar='VALUE', default=115200 ) parser.add_argument( 'port', help='path to the serial device port (e.g. COM1, /dev/ttyACM0)', type=str, metavar='PORT', ) args = parser.parse_args() device = args.port baudrate = args.baudrate init_logging(getattr(logging, args.logging_level)) logger.info('Streams API example application.') handler = GatewayMessageHandler( on_scanner_connected=on_connected, on_scanner_disconnected=on_disconnected, on_scan=on_scan, on_error=on_error, on_gateway_state_event=on_gateway_state_event, on_button_pressed=on_button_pressed_event ) try: gateway = Gateway(handler, device, baudrate) gateway.start() except ProgloveStreamsException as e: logging.error('Streams API exception: %s', e) return logger.info('application started, press Ctrl-C to exit') try: while True: time.sleep(1000) except KeyboardInterrupt: gateway.stop()
[ "logging.error", "argparse.ArgumentParser", "proglove_streams.gateway.GatewayMessageHandler", "time.sleep", "proglove_streams.gateway.Gateway", "logging.getLogger" ]
[((647, 674), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (664, 674), False, 'import logging\n'), ((4077, 4120), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""proglove_streams"""'], {}), "('proglove_streams')\n", (4100, 4120), False, 'import argparse\n'), ((4922, 5158), 'proglove_streams.gateway.GatewayMessageHandler', 'GatewayMessageHandler', ([], {'on_scanner_connected': 'on_connected', 'on_scanner_disconnected': 'on_disconnected', 'on_scan': 'on_scan', 'on_error': 'on_error', 'on_gateway_state_event': 'on_gateway_state_event', 'on_button_pressed': 'on_button_pressed_event'}), '(on_scanner_connected=on_connected,\n on_scanner_disconnected=on_disconnected, on_scan=on_scan, on_error=\n on_error, on_gateway_state_event=on_gateway_state_event,\n on_button_pressed=on_button_pressed_event)\n', (4943, 5158), False, 'from proglove_streams.gateway import Gateway, GatewayMessageHandler\n'), ((5228, 5262), 'proglove_streams.gateway.Gateway', 'Gateway', (['handler', 'device', 'baudrate'], {}), '(handler, device, baudrate)\n', (5235, 5262), False, 'from proglove_streams.gateway import Gateway, GatewayMessageHandler\n'), ((5337, 5382), 'logging.error', 'logging.error', (['"""Streams API exception: %s"""', 'e'], {}), "('Streams API exception: %s', e)\n", (5350, 5382), False, 'import logging\n'), ((5502, 5518), 'time.sleep', 'time.sleep', (['(1000)'], {}), '(1000)\n', (5512, 5518), False, 'import time\n')]
""" Add an owner to a resource or resources Usage: add_owner {username} {resource list} """ from django.core.management.base import BaseCommand from django.contrib.auth.models import User from hs_core.models import BaseResource from hs_core.hydroshare.utils import get_resource_by_shortkey from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes from django_irods.icommands import SessionException from django.db import transaction def set_quota_holder(resource, user): try: resource.set_quota_holder(user, user) except SessionException as ex: # some resources copied from www for testing do not exist in the iRODS backend, # hence need to skip these test artifects print(resource.short_id + ' raised SessionException when setting quota holder: ' + ex.stderr) except AttributeError as ex: # when federation is not set up correctly, istorage does not have a session # attribute, hence raise AttributeError - ignore for testing print((resource.short_id + ' raised AttributeError when setting quota holder: ' + str(ex))) except ValueError as ex: # when federation is not set up correctly, istorage does not have a session # attribute, hence raise AttributeError - ignore for testing print((resource.short_id + ' raised ValueError when setting quota holder: ' + str(ex))) class Command(BaseCommand): help = "add owner to resource" def add_arguments(self, parser): parser.add_argument('new_owner', type=str) parser.add_argument( '--owned_by', dest='owned_by', help='prior owner of the resources' ) parser.add_argument( '--set_quota_holder', action='store_true', # True for presence, False for absence dest='set_quota_holder', # value is options['set_quota_holder'] help='set quota holder as new owner') # a list of resource id's: none does nothing. parser.add_argument('resource_ids', nargs='*', type=str) def handle(self, *args, **options): user = User.objects.get(username=options['new_owner']) admin = User.objects.get(username='admin') if options['owned_by'] is not None: prior = User.objects.get(username=options['owned_by']) for res in BaseResource.objects.filter(r2urp__user=prior, r2urp__privilege=PrivilegeCodes.OWNER): with transaction.atomic(): resource = res.get_content_model() UserResourcePrivilege.share(user=user, resource=resource, privilege=PrivilegeCodes.OWNER, grantor=admin) print("added owner {} to {}".format(options['new_owner'], resource.short_id)) if options['set_quota_holder']: set_quota_holder(resource, user) print("set quota holder to {} for {}".format(options['new_owner'], resource.short_id)) if len(options['resource_ids']) > 0: # an array of resource short_id to check. for rid in options['resource_ids']: resource = get_resource_by_shortkey(rid, or_404=False) with transaction.atomic(): UserResourcePrivilege.share(user=user, resource=resource, privilege=PrivilegeCodes.OWNER, grantor=admin) print("added owner {} to {}".format(options['new_owner'], rid)) if options['set_quota_holder']: set_quota_holder(resource, user) print("set quota holder to {} for {}".format(options['new_owner'], resource.short_id))
[ "hs_core.models.BaseResource.objects.filter", "django.contrib.auth.models.User.objects.get", "hs_access_control.models.privilege.UserResourcePrivilege.share", "django.db.transaction.atomic", "hs_core.hydroshare.utils.get_resource_by_shortkey" ]
[((2177, 2224), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "options['new_owner']"}), "(username=options['new_owner'])\n", (2193, 2224), False, 'from django.contrib.auth.models import User\n'), ((2241, 2275), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""admin"""'}), "(username='admin')\n", (2257, 2275), False, 'from django.contrib.auth.models import User\n'), ((2341, 2387), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "options['owned_by']"}), "(username=options['owned_by'])\n", (2357, 2387), False, 'from django.contrib.auth.models import User\n'), ((2411, 2501), 'hs_core.models.BaseResource.objects.filter', 'BaseResource.objects.filter', ([], {'r2urp__user': 'prior', 'r2urp__privilege': 'PrivilegeCodes.OWNER'}), '(r2urp__user=prior, r2urp__privilege=\n PrivilegeCodes.OWNER)\n', (2438, 2501), False, 'from hs_core.models import BaseResource\n'), ((3429, 3472), 'hs_core.hydroshare.utils.get_resource_by_shortkey', 'get_resource_by_shortkey', (['rid'], {'or_404': '(False)'}), '(rid, or_404=False)\n', (3453, 3472), False, 'from hs_core.hydroshare.utils import get_resource_by_shortkey\n'), ((2570, 2590), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2588, 2590), False, 'from django.db import transaction\n'), ((2667, 2776), 'hs_access_control.models.privilege.UserResourcePrivilege.share', 'UserResourcePrivilege.share', ([], {'user': 'user', 'resource': 'resource', 'privilege': 'PrivilegeCodes.OWNER', 'grantor': 'admin'}), '(user=user, resource=resource, privilege=\n PrivilegeCodes.OWNER, grantor=admin)\n', (2694, 2776), False, 'from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes\n'), ((3494, 3514), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (3512, 3514), False, 'from django.db import transaction\n'), ((3536, 3645), 'hs_access_control.models.privilege.UserResourcePrivilege.share', 'UserResourcePrivilege.share', ([], {'user': 'user', 'resource': 'resource', 'privilege': 'PrivilegeCodes.OWNER', 'grantor': 'admin'}), '(user=user, resource=resource, privilege=\n PrivilegeCodes.OWNER, grantor=admin)\n', (3563, 3645), False, 'from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test Primitive Data Real ------------------------ """ import unittest import struct import math from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob from bacpypes.errors import InvalidTag from bacpypes.primitivedata import Real, Tag # some debugging _debug = 0 _log = ModuleLogger(globals()) @bacpypes_debugging def real_tag(x): """Convert a hex string to an real application tag.""" if _debug: real_tag._debug("real_tag %r", x) b = xtob(x) tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b) if _debug: real_tag._debug(" - tag: %r", tag) return tag @bacpypes_debugging def real_encode(obj): """Encode an Real object into a tag.""" if _debug: real_encode._debug("real_encode %r", obj) tag = Tag() obj.encode(tag) if _debug: real_encode._debug(" - tag: %r, %r", tag, tag.tagData) return tag @bacpypes_debugging def real_decode(tag): """Decode an real application tag into an real.""" if _debug: real_decode._debug("real_decode %r", tag) obj = Real(tag) if _debug: real_decode._debug(" - obj: %r, %r", obj, obj.value) return obj @bacpypes_debugging def real_endec(v, x): """Pass the value to Real, construct a tag from the hex string, and compare results of encode and decoding each other.""" if _debug: real_endec._debug("real_endec %r %r", v, x) tag = real_tag(x) if _debug: real_endec._debug(" - tag: %r, %r", tag, tag.tagData) obj = Real(v) if _debug: real_endec._debug(" - obj: %r, %r", obj, obj.value) assert real_encode(obj) == tag if _debug: real_endec._debug(" - tags match") if math.isnan(v): assert math.isnan(real_decode(tag).value) if _debug: real_endec._debug(" - both NaN") else: assert real_decode(tag) == obj if _debug: real_endec._debug(" - objects match") @bacpypes_debugging class TestReal(unittest.TestCase): def test_real(self): if _debug: TestReal._debug("test_real") obj = Real() assert obj.value == 0.0 with self.assertRaises(TypeError): Real("some string") def test_real_real(self): if _debug: TestReal._debug("test_real_real") obj = Real(1.0) assert obj.value == 1.0 assert str(obj) == "Real(1)" obj = Real(73.5) assert obj.value == 73.5 assert str(obj) == "Real(73.5)" def test_real_tag(self): if _debug: TestReal._debug("test_real_tag") tag = Tag(Tag.applicationTagClass, Tag.realAppTag, 1, xtob('3f800000')) obj = Real(tag) assert obj.value == 1.0 tag = Tag(Tag.applicationTagClass, Tag.booleanAppTag, 0, xtob('')) with self.assertRaises(InvalidTag): Real(tag) tag = Tag(Tag.contextTagClass, 0, 1, xtob('ff')) with self.assertRaises(InvalidTag): Real(tag) tag = Tag(Tag.openingTagClass, 0) with self.assertRaises(InvalidTag): Real(tag) def test_real_copy(self): if _debug: TestReal._debug("test_real_copy") obj1 = Real(12) obj2 = Real(obj1) assert obj2.value == 12 def test_real_endec(self): if _debug: TestReal._debug("test_real_endec") with self.assertRaises(InvalidTag): obj = Real(real_tag('')) real_endec(0, '00000000') real_endec(1, '3f800000') real_endec(-1, 'bf800000') real_endec(73.5, '42930000') inf = float('inf') real_endec(inf, '7f800000') real_endec(-inf, 'ff800000') nan = float('nan') real_endec(nan, '7fc00000')
[ "bacpypes.primitivedata.Tag", "bacpypes.primitivedata.Real", "math.isnan", "bacpypes.debugging.xtob" ]
[((520, 527), 'bacpypes.debugging.xtob', 'xtob', (['x'], {}), '(x)\n', (524, 527), False, 'from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob\n'), ((819, 824), 'bacpypes.primitivedata.Tag', 'Tag', ([], {}), '()\n', (822, 824), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((1101, 1110), 'bacpypes.primitivedata.Real', 'Real', (['tag'], {}), '(tag)\n', (1105, 1110), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((1537, 1544), 'bacpypes.primitivedata.Real', 'Real', (['v'], {}), '(v)\n', (1541, 1544), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((1712, 1725), 'math.isnan', 'math.isnan', (['v'], {}), '(v)\n', (1722, 1725), False, 'import math\n'), ((2087, 2093), 'bacpypes.primitivedata.Real', 'Real', ([], {}), '()\n', (2091, 2093), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2301, 2310), 'bacpypes.primitivedata.Real', 'Real', (['(1.0)'], {}), '(1.0)\n', (2305, 2310), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2395, 2405), 'bacpypes.primitivedata.Real', 'Real', (['(73.5)'], {}), '(73.5)\n', (2399, 2405), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2656, 2665), 'bacpypes.primitivedata.Real', 'Real', (['tag'], {}), '(tag)\n', (2660, 2665), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2979, 3006), 'bacpypes.primitivedata.Tag', 'Tag', (['Tag.openingTagClass', '(0)'], {}), '(Tag.openingTagClass, 0)\n', (2982, 3006), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((3173, 3181), 'bacpypes.primitivedata.Real', 'Real', (['(12)'], {}), '(12)\n', (3177, 3181), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((3197, 3207), 'bacpypes.primitivedata.Real', 'Real', (['obj1'], {}), '(obj1)\n', (3201, 3207), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2182, 2201), 'bacpypes.primitivedata.Real', 'Real', (['"""some string"""'], {}), "('some string')\n", (2186, 2201), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2624, 2640), 'bacpypes.debugging.xtob', 'xtob', (['"""3f800000"""'], {}), "('3f800000')\n", (2628, 2640), False, 'from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob\n'), ((2764, 2772), 'bacpypes.debugging.xtob', 'xtob', (['""""""'], {}), "('')\n", (2768, 2772), False, 'from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob\n'), ((2830, 2839), 'bacpypes.primitivedata.Real', 'Real', (['tag'], {}), '(tag)\n', (2834, 2839), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((2886, 2896), 'bacpypes.debugging.xtob', 'xtob', (['"""ff"""'], {}), "('ff')\n", (2890, 2896), False, 'from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob\n'), ((2954, 2963), 'bacpypes.primitivedata.Real', 'Real', (['tag'], {}), '(tag)\n', (2958, 2963), False, 'from bacpypes.primitivedata import Real, Tag\n'), ((3063, 3072), 'bacpypes.primitivedata.Real', 'Real', (['tag'], {}), '(tag)\n', (3067, 3072), False, 'from bacpypes.primitivedata import Real, Tag\n')]
import matplotlib, numpy, pprint # matplotlib.rcParams['pdf.fonttype'] = 42 # matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.use('Agg') import matplotlib.pyplot as plot import gzip, csv, pylab from collections import namedtuple from rvs import * from patch import * """ task events table contains the following fields: 1. timestamp 2. missing info 3. job ID 4. task index - within the job 5. machine ID 6. event type 7. user name 8. scheduling class 9. priority 10. resource request for CPU cores 11. resource request for RAM 12. resource request for local disk space 13. different-machine constraint """ jobevents_f_to_i = { 'timestamp': 0, 'job id': 2, 'event': 3, 'job name': 6, 'logical job name': 7 } taskevents_f_to_i = { 'timestamp': 0, 'job id': 2, 'task index': 3, 'event': 5 } e_to_i = { 'schedule': 1, 'finish': 4 } def counter_to_furl(counter, obj="task"): part = str(counter) part = (5 - len(part) )*'0' + part return "/home/mfa51/google-clusterdata-2011/{}_events/part-{}-of-00500.csv.gz".format(obj, part) def deneme(): job_task_i__sch_finish_time_m = {} counter = 0 while 1: furl = counter_to_furl(counter) try: with gzip.open(furl, mode="rt") as f: reader = csv.reader(f) for line in reader: i = line[taskevents_f_to_i['job id'] ] + '_' + line[taskevents_f_to_i['task index'] ] e = int(line[taskevents_f_to_i['event'] ] ) if e == e_to_i['schedule'] or e == e_to_i['finish']: t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6 if i not in job_task_i__sch_finish_time_m: job_task_i__sch_finish_time_m[i] = [t] else: job_task_i__sch_finish_time_m[i].append(t) except (OSError, IOError) as e: log(WARNING, "done with the files.") break counter += 1 if counter > 10: break with open("task_lifetime.dat", 'wt') as f: writer = csv.writer(f, delimiter=',') for job_task_i,sch_finish_time in job_task_i__sch_finish_time_m.items(): if len(sch_finish_time) >= 2: sch_finish_time = [t for t in sch_finish_time if t] if len(sch_finish_time) == 1: sch_finish_time.append(0) # elif len(sch_finish_time) > 2: # log(WARNING, "More than 2 scheduling or finish events for single task; sch_finish_time= {}".format(sch_finish_time) ) lifetime = abs(sch_finish_time[1] - sch_finish_time[0] ) writer.writerow([job_task_i, lifetime] ) def write_num_tasks_per_job(): wf = open("num_tasks.dat", 'wt') writer = csv.writer(wf, delimiter=',') counter = 0 while 1: print("counter= {}".format(counter) ) ji__ti_l_m = {} furl = counter_to_furl(counter) try: with gzip.open(furl, mode="rt") as f: reader = csv.reader(f) for line in reader: ji = int(line[taskevents_f_to_i['job id'] ] ) ti = int(line[taskevents_f_to_i['task index'] ] ) e = int(line[taskevents_f_to_i['event'] ] ) if e == e_to_i['schedule']: if ji not in ji__ti_l_m: ji__ti_l_m[ji] = set() ji__ti_l_m[ji].add(ti) print("counter= {}, writing now...".format(counter) ) for ji, ti_l in ji__ti_l_m.items(): writer.writerow([ji, len(ti_l) ] ) except (OSError, IOError) as e: log(WARNING, "done with the files.") break counter += 1 if counter > 510: break wf.close() def do_possible_merges_in_num_tasks(): ji__num_task_m = {} with open("num_tasks.dat", mode="rt") as f: reader = csv.reader(f) for line in reader: ji = int(line[0] ) num_task = int(line[1] ) if ji not in ji__num_task_m: ji__num_task_m[ji] = 0 ji__num_task_m[ji] += num_task with open("num_tasks_merged.dat", mode="wt") as f: writer = csv.writer(f, delimiter=',') for ji, num_tasks in ji__num_task_m.items(): writer.writerow([ji, num_tasks] ) log(WARNING, "done.") def write_jobs_w_num_task(num_task): ji_l = [] with open("num_tasks_merged.dat", mode="rt") as f: reader = csv.reader(f) for line in reader: num_task_ = int(line[1] ) if num_task_ == num_task: ji_l.append(int(line[0] ) ) print("writing, len(ji_l)= {}".format(len(ji_l) ) ) with open("jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f: writer = csv.writer(f, delimiter=',') for ji in ji_l: writer.writerow([ji] ) log(WARNING, "done.") def write_task_lifetimes(num_task): log(WARNING, "started; num_task= {}".format(num_task) ) ji_l = [] with open("jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f: reader = csv.reader(f) for line in reader: ji_l.append(int(line[0] ) ) # Entry = namedtuple('Entry', 'ji ti') entry__sch_fin_l_m = {} counter = 0 while 1: print("counter= {}".format(counter) ) furl = counter_to_furl(counter) try: with gzip.open(furl, mode="rt") as f: reader = csv.reader(f) for line in reader: ji = int(line[taskevents_f_to_i['job id'] ] ) if ji in ji_l: e = int(line[taskevents_f_to_i['event'] ] ) if e == e_to_i['schedule'] or e == e_to_i['finish']: ti = int(line[taskevents_f_to_i['task index'] ] ) entry = Entry(ji=ji, ti=ti) t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6 if entry not in entry__sch_fin_l_m: entry__sch_fin_l_m[entry] = [0,0] if e == e_to_i['schedule']: entry__sch_fin_l_m[entry][0] = t elif e == e_to_i['finish']: entry__sch_fin_l_m[entry][1] = t except (OSError, IOError) as e: log(WARNING, "done with the files.") break counter += 1 if counter > 510: break print("writing now...") with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f: writer = csv.writer(f, delimiter=',') for entry, sch_fin_tuple in entry__sch_fin_l_m.items(): if sch_fin_tuple[0] < sch_fin_tuple[1]: lt = sch_fin_tuple[1] - sch_fin_tuple[0] writer.writerow([lt] ) log(WARNING, "done.") def filter_task_lifetimes(num_task): lifetime_l = [] with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f: reader = csv.reader(f) for line in reader: lt = float(line[0] ) if lt < 5000: lifetime_l.append(lt) with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f: writer = csv.writer(f, delimiter=',') for lt in lifetime_l: writer.writerow([lt] ) log(WARNING, "done.") # ****************************** PLOT ***************************** # def plot_num_tasks_hist(): num_tasks_l = [] with open("num_tasks_merged.dat", mode="rt") as f: reader = csv.reader(f) for line in reader: num_task = int(line[1] ) # if num_task > 1000: # print("num_task= {}".format(num_task) ) # if num_task > 1 and num_task < 2000: num_tasks_l.append(num_task) # num_task__num_job_m = {} # for n in num_tasks_l: # if n not in num_task__num_job_m: # num_task__num_job_m[n] = 0 # num_task__num_job_m[n] += 1 # print("num_task__num_job_m= {}".format(pprint.pformat(num_task__num_job_m) ) ) # plot.hist(num_tasks_l, bins=1000, histtype='step') plot.hist(num_tasks_l, bins=100, histtype='step', normed=True, lw=2) plot.xlabel("Number of tasks") plot.ylabel("Frequency") plot.savefig("plot_num_tasks_hist.png", bbox_inches='tight') plot.gcf().clear() log(WARNING, "done.") def plot_task_lifetime_hist(k): lifetime_l = [] with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f: reader = csv.reader(f) for line in reader: lifetime_l.append(float(line[0] ) ) # rv = Pareto(a=2, loc=2) # for i in range(1000000): # lifetime_l.append(rv.gen_sample() ) lifetime_l = numpy.sort(lifetime_l) print("len(lifetime_l)= {}".format(len(lifetime_l) ) ) fig = plot.figure(1) # def_size = fig.get_size_inches() # fig.set_size_inches(def_size[0]*1.5, def_size[1] ) plot.subplot(211) # plot.step(x_l, y_l, 'bo', label='log-linear', lw=2) plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2) plot.xlabel("X (s)") plot.ylabel("Frequency") plot.title(r'$k= {}$'.format(k) ) x_l = lifetime_l[::-1] y_l = numpy.arange(lifetime_l.size)/lifetime_l.size plot.subplot(223) plot.yscale('log') plot.step(x_l, y_l, 'bo', label='log(tail) vs. X', lw=2) plot.xlabel("X (s)") plot.ylabel("Tail") plot.legend() plot.subplot(224) plot.xscale('log') plot.yscale('log') plot.step(x_l, y_l, 'bo', label='log(tail) vs. log(X)', lw=2) plot.xlabel("X (s)") plot.legend() # plot.xlabel("X") # plot.xlabel("Task lifetime X (s)") # plot.ylabel(r'$Pr\{X > x\}$') plot.savefig("plot_task_lifetime_hist_k_{}.png".format(k) ) plot.gcf().clear() log(WARNING, "done; k= {}".format(k) ) def pplot_task_lifetime_hist(k): log(INFO, "started; k= {}".format(k) ) lifetime_l = [] # with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f: with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f: reader = csv.reader(f) for line in reader: lifetime_l.append(float(line[0] ) ) lifetime_l = numpy.sort(lifetime_l) print("len(lifetime_l)= {}".format(len(lifetime_l) ) ) # # plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2) x_l = lifetime_l[::-1] y_l = numpy.arange(lifetime_l.size)/lifetime_l.size # y_l = [math.log(y + 0.000001) for y in y_l] # m, b = numpy.polyfit(x_l, y_l, 1) # plot.plot(x_l, m*x_l+b, 'r', lw=1, ls=':') # step_size = 10 # num_rank = math.ceil(x_l[0]/step_size) # # rank__avg_lifetime_l = [] # rank__num_lifetime_l = [] # i = 0 # for r in range(1, num_rank+1): # sum_ = 0 # counter = 0 # while i < len(x_l) and x_l[i] > x_l[0]-r*step_size: # counter += 1 # sum_ += x_l[i] # i += 1 # rank__num_lifetime_l.append(counter) # # avg = 0 # # if counter: # # avg = sum_/counter # # rank__avg_lifetime_l.append(avg) # # print("i= {}, rank__avg_lifetime_l=\n{}".format(i, rank__avg_lifetime_l) ) # rank__num_lifetime_l = list(reversed(rank__num_lifetime_l) ) # rank_freq_l = [n/sum(rank__num_lifetime_l) for n in rank__num_lifetime_l] # rank_tailprob_l = [sum(rank_freq_l[r-1:]) for r in range(1, num_rank+1) ] # # plot.plot(range(1, num_rank+1), rank__avg_lifetime_l, 'bo', ls=':') # # plot.xlabel(r'Rank', fontsize=13) # # plot.ylabel(r'Tail distribution', fontsize=13) # # plot.step(range(1, num_rank+1), rank_tailprob_l, 'bo', ls=':') # # plot.yscale('log') # # plot.xscale('log') if k == 15: plot.xlim(([10, 2*10**5] ) ) plot.ylim(([1/2*10**(-5), 1.3] ) ) elif k == 400: plot.xlim(([10, 2*10**4] ) ) plot.ylim(([10**(-6), 1.3] ) ) elif k == 1050: plot.xlim(([10, 2*10**4] ) ) plot.ylim(([10**(-6), 1.3] ) ) # plot.step(x_l, y_l, 'bo', lw=1, ls=':') plot.step(x_l, y_l, 'bo', ms=10, mew=0, ls=':') plot.xscale('log') plot.yscale('log') plot.xlabel(r'Task lifetime', fontsize=18) plot.ylabel(r'Tail distribution', fontsize=18) # plot.ylabel(r'Fraction of tasks completed in x') # plot.title(r'Jobs with {} tasks'.format(k), fontsize=13) # plot.title('k= {}, Mean= {}, Stdev= {}'.format(k, round(numpy.mean(x_l), 1), round(numpy.std(x_l), 1) ), fontsize=13) plot.title('k= {}, Mean= {}'.format(k, round(numpy.mean(x_l), 1) ), fontsize=18) plot.gcf().set_size_inches(4, 3) prettify(plot.gca() ) # plot.savefig("pplot_task_lifetime_hist_k_{}.pdf".format(k) ) plot.savefig("pplot_task_lifetime_hist_k_{}.png".format(k), bbox_inches='tight') plot.gcf().clear() log(WARNING, "done; k= {}".format(k) ) def plot_qq_task_lifetimes(k): lifetime_l = [] # with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f: with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f: reader = csv.reader(f) for line in reader: lifetime_l.append(float(line[0] ) ) lifetime_l = numpy.sort(lifetime_l) print("len(lifetime_l)= {}".format(len(lifetime_l) ) ) # For different dists: https://docs.scipy.org/doc/scipy/reference/stats.html # scipy.stats.probplot(lifetime_l, dist="expon", plot=plot) # scipy.stats.probplot(lifetime_l, dist="pareto", sparams=(1.2,), plot=plot) plot.savefig("plot_qq_task_lifetimes_k_{}.png".format(k) ) log(WARNING, "done; k= {}".format(k) ) if __name__ == "__main__": ## Uncomment with caution! # write_num_tasks_per_job() # do_possible_merges_in_num_tasks() # write_jobs_w_num_task(num_task=15) # write_jobs_w_num_task(num_task=400) # write_jobs_w_num_task(num_task=1000) # write_jobs_w_num_task(num_task=1050) # write_task_lifetimes(num_task=15) # filter_task_lifetimes(num_task=15) # write_task_lifetimes(num_task=400) # filter_task_lifetimes(num_task=400) # write_task_lifetimes(num_task=1000) # filter_task_lifetimes(num_task=1000) # write_task_lifetimes(num_task=1050) # filter_task_lifetimes(num_task=1050) # plot_num_tasks_hist() # plot_task_lifetime_hist(k=15) # plot_task_lifetime_hist(k=400) # plot_task_lifetime_hist(k=1000) # plot_task_lifetime_hist(k=1050) # pplot_task_lifetime_hist(k=15) # pplot_task_lifetime_hist(k=400) # pplot_task_lifetime_hist(k=1000) pplot_task_lifetime_hist(k=1050) # plot_qq_task_lifetimes(k=400) pass
[ "matplotlib.pyplot.yscale", "csv.reader", "matplotlib.pyplot.step", "matplotlib.pyplot.figure", "numpy.mean", "numpy.arange", "matplotlib.pyplot.gca", "csv.writer", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "numpy.sort", "matplotlib.use", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.xscale", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlim", "gzip.open", "matplotlib.pyplot.hist", "collections.namedtuple", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((118, 139), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (132, 139), False, 'import matplotlib, numpy, pprint\n'), ((2584, 2613), 'csv.writer', 'csv.writer', (['wf'], {'delimiter': '""","""'}), "(wf, delimiter=',')\n", (2594, 2613), False, 'import gzip, csv, pylab\n'), ((4766, 4794), 'collections.namedtuple', 'namedtuple', (['"""Entry"""', '"""ji ti"""'], {}), "('Entry', 'ji ti')\n", (4776, 4794), False, 'from collections import namedtuple\n'), ((7427, 7495), 'matplotlib.pyplot.hist', 'plot.hist', (['num_tasks_l'], {'bins': '(100)', 'histtype': '"""step"""', 'normed': '(True)', 'lw': '(2)'}), "(num_tasks_l, bins=100, histtype='step', normed=True, lw=2)\n", (7436, 7495), True, 'import matplotlib.pyplot as plot\n'), ((7501, 7531), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Number of tasks"""'], {}), "('Number of tasks')\n", (7512, 7531), True, 'import matplotlib.pyplot as plot\n'), ((7534, 7558), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7545, 7558), True, 'import matplotlib.pyplot as plot\n'), ((7561, 7621), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""plot_num_tasks_hist.png"""'], {'bbox_inches': '"""tight"""'}), "('plot_num_tasks_hist.png', bbox_inches='tight')\n", (7573, 7621), True, 'import matplotlib.pyplot as plot\n'), ((8021, 8043), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (8031, 8043), False, 'import matplotlib, numpy, pprint\n'), ((8112, 8126), 'matplotlib.pyplot.figure', 'plot.figure', (['(1)'], {}), '(1)\n', (8123, 8126), True, 'import matplotlib.pyplot as plot\n'), ((8221, 8238), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (8233, 8238), True, 'import matplotlib.pyplot as plot\n'), ((8297, 8364), 'matplotlib.pyplot.hist', 'plot.hist', (['lifetime_l'], {'bins': '(100)', 'histtype': '"""step"""', 'normed': '(True)', 'lw': '(2)'}), "(lifetime_l, bins=100, histtype='step', normed=True, lw=2)\n", (8306, 8364), True, 'import matplotlib.pyplot as plot\n'), ((8367, 8387), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8378, 8387), True, 'import matplotlib.pyplot as plot\n'), ((8390, 8414), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (8401, 8414), True, 'import matplotlib.pyplot as plot\n'), ((8535, 8552), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(223)'], {}), '(223)\n', (8547, 8552), True, 'import matplotlib.pyplot as plot\n'), ((8555, 8573), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (8566, 8573), True, 'import matplotlib.pyplot as plot\n'), ((8576, 8632), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'label': '"""log(tail) vs. X"""', 'lw': '(2)'}), "(x_l, y_l, 'bo', label='log(tail) vs. X', lw=2)\n", (8585, 8632), True, 'import matplotlib.pyplot as plot\n'), ((8635, 8655), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8646, 8655), True, 'import matplotlib.pyplot as plot\n'), ((8658, 8677), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Tail"""'], {}), "('Tail')\n", (8669, 8677), True, 'import matplotlib.pyplot as plot\n'), ((8680, 8693), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (8691, 8693), True, 'import matplotlib.pyplot as plot\n'), ((8696, 8713), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(224)'], {}), '(224)\n', (8708, 8713), True, 'import matplotlib.pyplot as plot\n'), ((8716, 8734), 'matplotlib.pyplot.xscale', 'plot.xscale', (['"""log"""'], {}), "('log')\n", (8727, 8734), True, 'import matplotlib.pyplot as plot\n'), ((8737, 8755), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (8748, 8755), True, 'import matplotlib.pyplot as plot\n'), ((8758, 8819), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'label': '"""log(tail) vs. log(X)"""', 'lw': '(2)'}), "(x_l, y_l, 'bo', label='log(tail) vs. log(X)', lw=2)\n", (8767, 8819), True, 'import matplotlib.pyplot as plot\n'), ((8822, 8842), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8833, 8842), True, 'import matplotlib.pyplot as plot\n'), ((8845, 8858), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (8856, 8858), True, 'import matplotlib.pyplot as plot\n'), ((9463, 9485), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (9473, 9485), False, 'import matplotlib, numpy, pprint\n'), ((11206, 11253), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'ms': '(10)', 'mew': '(0)', 'ls': '""":"""'}), "(x_l, y_l, 'bo', ms=10, mew=0, ls=':')\n", (11215, 11253), True, 'import matplotlib.pyplot as plot\n'), ((11259, 11277), 'matplotlib.pyplot.xscale', 'plot.xscale', (['"""log"""'], {}), "('log')\n", (11270, 11277), True, 'import matplotlib.pyplot as plot\n'), ((11280, 11298), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (11291, 11298), True, 'import matplotlib.pyplot as plot\n'), ((11301, 11342), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Task lifetime"""'], {'fontsize': '(18)'}), "('Task lifetime', fontsize=18)\n", (11312, 11342), True, 'import matplotlib.pyplot as plot\n'), ((11346, 11391), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Tail distribution"""'], {'fontsize': '(18)'}), "('Tail distribution', fontsize=18)\n", (11357, 11391), True, 'import matplotlib.pyplot as plot\n'), ((12324, 12346), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (12334, 12346), False, 'import matplotlib, numpy, pprint\n'), ((1945, 1973), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1955, 1973), False, 'import gzip, csv, pylab\n'), ((3587, 3600), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3597, 3600), False, 'import gzip, csv, pylab\n'), ((3850, 3878), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (3860, 3878), False, 'import gzip, csv, pylab\n'), ((4108, 4121), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4118, 4121), False, 'import gzip, csv, pylab\n'), ((4385, 4413), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (4395, 4413), False, 'import gzip, csv, pylab\n'), ((4679, 4692), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4689, 4692), False, 'import gzip, csv, pylab\n'), ((5968, 5996), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (5978, 5996), False, 'import gzip, csv, pylab\n'), ((6367, 6380), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6377, 6380), False, 'import gzip, csv, pylab\n'), ((6598, 6626), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (6608, 6626), False, 'import gzip, csv, pylab\n'), ((6891, 6904), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6901, 6904), False, 'import gzip, csv, pylab\n'), ((7824, 7837), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (7834, 7837), False, 'import gzip, csv, pylab\n'), ((8487, 8516), 'numpy.arange', 'numpy.arange', (['lifetime_l.size'], {}), '(lifetime_l.size)\n', (8499, 8516), False, 'import matplotlib, numpy, pprint\n'), ((9365, 9378), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (9375, 9378), False, 'import gzip, csv, pylab\n'), ((9656, 9685), 'numpy.arange', 'numpy.arange', (['lifetime_l.size'], {}), '(lifetime_l.size)\n', (9668, 9685), False, 'import matplotlib, numpy, pprint\n'), ((10918, 10946), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 5]'], {}), '([10, 2 * 10 ** 5])\n', (10927, 10946), True, 'import matplotlib.pyplot as plot\n'), ((10951, 10985), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[1 / 2 * 10 ** -5, 1.3]'], {}), '([1 / 2 * 10 ** -5, 1.3])\n', (10960, 10985), True, 'import matplotlib.pyplot as plot\n'), ((11761, 11771), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (11769, 11771), True, 'import matplotlib.pyplot as plot\n'), ((12226, 12239), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (12236, 12239), False, 'import gzip, csv, pylab\n'), ((7624, 7634), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (7632, 7634), True, 'import matplotlib.pyplot as plot\n'), ((9020, 9030), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (9028, 9030), True, 'import matplotlib.pyplot as plot\n'), ((11007, 11035), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 4]'], {}), '([10, 2 * 10 ** 4])\n', (11016, 11035), True, 'import matplotlib.pyplot as plot\n'), ((11040, 11066), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[10 ** -6, 1.3]'], {}), '([10 ** -6, 1.3])\n', (11049, 11066), True, 'import matplotlib.pyplot as plot\n'), ((11717, 11727), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (11725, 11727), True, 'import matplotlib.pyplot as plot\n'), ((11924, 11934), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (11932, 11934), True, 'import matplotlib.pyplot as plot\n'), ((1190, 1216), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (1199, 1216), False, 'import gzip, csv, pylab\n'), ((1240, 1253), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1250, 1253), False, 'import gzip, csv, pylab\n'), ((2757, 2783), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (2766, 2783), False, 'import gzip, csv, pylab\n'), ((2807, 2820), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2817, 2820), False, 'import gzip, csv, pylab\n'), ((4944, 4970), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (4953, 4970), False, 'import gzip, csv, pylab\n'), ((4994, 5007), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5004, 5007), False, 'import gzip, csv, pylab\n'), ((11093, 11121), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 4]'], {}), '([10, 2 * 10 ** 4])\n', (11102, 11121), True, 'import matplotlib.pyplot as plot\n'), ((11126, 11152), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[10 ** -6, 1.3]'], {}), '([10 ** -6, 1.3])\n', (11135, 11152), True, 'import matplotlib.pyplot as plot\n'), ((11676, 11691), 'numpy.mean', 'numpy.mean', (['x_l'], {}), '(x_l)\n', (11686, 11691), False, 'import matplotlib, numpy, pprint\n')]
#!/usr/bin/env python # -*-Python-*- import argparse import contextlib import datetime import ftplib import os import re import subprocess import tempfile def get_valid_filename(s): """ Return the given string converted to a string that can be used for a clean filename. Remove leading and trailing spaces; convert other spaces to underscores; and remove anything that is not an alphanumeric, dash, underscore, or dot. >>> get_valid_filename("john's portrait in 2004.jpg") 'johns_portrait_in_2004.jpg' """ s = s.strip().replace(" ", "_") return re.sub(r"(?u)[^-\w.]", "", s) def read_config(): # Read the config with open(os.path.expanduser("~/.config/fileup/config")) as f: """Create a config file at ~/.config/fileup/config with the following information and structure: example.com file_up_folder my_user_name my_difficult_password """ base_url, base_folder, folder, user, pw = [ s.replace("\n", "") for s in f.readlines() ] return base_url, base_folder, folder, user, pw def remove_old_files(ftp, today): # Remove all files that are past the limit files = [f for f in ftp.nlst() if "_delete_on_" in f] file_dates = [f.rsplit("_delete_on_", 1) for f in files] for file_name, date in file_dates: rm_date = datetime.datetime.strptime(date, "%Y-%m-%d").date() if rm_date < today: print(f'removing "{file_name}" because the date passed') try: ftp.delete(file_name) except Exception: # File didn't exist anymore for some reason... pass ftp.delete(file_name + "_delete_on_" + date) def main(): # Get arguments description = [ "Publish a file. \n \n", "Create a config file at ~/.config/fileup/config with the following information and structure:\n", "example.com", "base_folder", "file_up_folder", "my_user_name", "my_difficult_password", ] parser = argparse.ArgumentParser( description="\n".join(description), formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument("fname", type=str) parser.add_argument( "-t", "--time", type=int, default=90, help="If time is 0 the file will never be deleted, default is 90 days.", ) parser.add_argument("-d", "--direct", action="store_true") parser.add_argument("-i", "--img", action="store_true") args = parser.parse_args() fname = os.path.abspath(os.path.expanduser(args.fname)) fname_base = os.path.basename(fname) base_url, base_folder, folder, user, pw = read_config() # Connect to server ftp = ftplib.FTP(base_url, user, pw) ftp.cwd(os.path.join(base_folder, folder)) # Fix the filename to avoid filename character issues fname_base = get_valid_filename(fname_base) today = datetime.datetime.now().date() remove_old_files(ftp, today) # Delete first if file already exists, it could happen that there is # already a file with a specified deletion date, these should be removed. for f in ftp.nlst(): if f.startswith(fname_base) and "_delete_on_" in f: ftp.delete(f) if args.time != 0: # could be negative (used for debugging). remove_on = today + datetime.timedelta(days=args.time) fname_date = fname_base + "_delete_on_" + str(remove_on) with tempfile.TemporaryFile() as f: print("upload " + fname_date) ftp.storbinary(f"STOR {fname_date}", f) # Upload and open the actual file with open(fname, "rb") as f: ftp.storbinary(f"STOR {fname_base}", f) print("upload " + fname_base) ftp.quit() # Create URL if folder: url = f"{base_url}/{folder}/{fname_base}" else: url = f"{base_url}/{fname_base}" if args.direct: # Returns the url as is. url = "http://" + url elif args.img: url = f"![](http://{url})" elif fname.endswith(".ipynb"): # Return the url in the nbviewer url = "http://nbviewer.jupyter.org/url/" + url + "?flush_cache=true" else: url = "http://" + url # Put a URL into clipboard only works on OS X with contextlib.suppress(Exception): process = subprocess.Popen( "pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE ) process.communicate(url.encode("utf-8")) print("Your url is: ", url) if __name__ == "__main__": main()
[ "subprocess.Popen", "os.path.join", "os.path.basename", "contextlib.suppress", "datetime.datetime.now", "tempfile.TemporaryFile", "datetime.datetime.strptime", "datetime.timedelta", "ftplib.FTP", "os.path.expanduser", "re.sub" ]
[((588, 617), 're.sub', 're.sub', (['"""(?u)[^-\\\\w.]"""', '""""""', 's'], {}), "('(?u)[^-\\\\w.]', '', s)\n", (594, 617), False, 'import re\n'), ((2695, 2718), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (2711, 2718), False, 'import os\n'), ((2815, 2845), 'ftplib.FTP', 'ftplib.FTP', (['base_url', 'user', 'pw'], {}), '(base_url, user, pw)\n', (2825, 2845), False, 'import ftplib\n'), ((2645, 2675), 'os.path.expanduser', 'os.path.expanduser', (['args.fname'], {}), '(args.fname)\n', (2663, 2675), False, 'import os\n'), ((2858, 2891), 'os.path.join', 'os.path.join', (['base_folder', 'folder'], {}), '(base_folder, folder)\n', (2870, 2891), False, 'import os\n'), ((4375, 4405), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (4394, 4405), False, 'import contextlib\n'), ((4425, 4503), 'subprocess.Popen', 'subprocess.Popen', (['"""pbcopy"""'], {'env': "{'LANG': 'en_US.UTF-8'}", 'stdin': 'subprocess.PIPE'}), "('pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\n", (4441, 4503), False, 'import subprocess\n'), ((675, 720), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config/fileup/config"""'], {}), "('~/.config/fileup/config')\n", (693, 720), False, 'import os\n'), ((3013, 3036), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3034, 3036), False, 'import datetime\n'), ((3435, 3469), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'args.time'}), '(days=args.time)\n', (3453, 3469), False, 'import datetime\n'), ((3548, 3572), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (3570, 3572), False, 'import tempfile\n'), ((1390, 1434), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (1416, 1434), False, 'import datetime\n')]
import glob from pathlib import Path from typing import List, Union ARCHIVE_EXTENSIONS = ['tar'] IMG_EXTENSIONS = ['jpeg', 'jpg', 'bmp', 'png'] VID_EXTENSIONS = ['mp4', 'avi', 'mov', 'mkv', 'mts', 'ts', 'webm'] def normalize_path(path: Union[str, Path]) -> Path: return Path(path).expanduser().resolve() def file_extension(path: str) -> str: """ Extracts canonical file extension from path (no leading dot and all lowercase) e.g. mp4, avi, jpeg, ts """ return Path(path).suffix[1:].lower() def files_in_dir(dir: Path, recursive=True, sort=False) -> List[str]: """ Iterates recursively through all files in all subfolders. """ path = Path(dir) if recursive: search_path = str(path.joinpath('**')) file_list = glob.glob(search_path, recursive=True) else: search_path = str(path.joinpath('*')) file_list = glob.glob(search_path, recursive=False) file_list = [f for f in file_list if Path(f).is_file()] if sort: file_list.sort() return file_list def is_archive(file_path: str) -> bool: """ Determine of a given file is an archive (according to its extension). """ file_ext = file_extension(file_path) return file_ext in ARCHIVE_EXTENSIONS def is_image(file_path: str) -> bool: """ Determine of a given file is an image (according to its extension). """ file_ext = file_extension(file_path) return file_ext in IMG_EXTENSIONS def is_video(file_path: str) -> bool: """ Determine of a given file is an video (according to its extension). """ file_ext = file_extension(file_path) return file_ext in VID_EXTENSIONS def archives_in_dir(dir: Path, recursive=True, sort=False): """ Iterates recursively over all archives in all subfolders. """ file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort) for file in file_list: if is_archive(file): yield file def images_in_dir(dir: Path, recursive=True, sort=False): """ Iterates recursively over all images in all subfolders. """ file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort) for file in file_list: if is_image(file): yield file def videos_in_dir(dir: Path, recursive=True, sort=False): """ Iterates recursively over all videos in all subfolders. """ file_list = files_in_dir(dir=dir, recursive=recursive, sort=sort) for file in file_list: if is_video(file): yield file
[ "pathlib.Path", "glob.glob" ]
[((682, 691), 'pathlib.Path', 'Path', (['dir'], {}), '(dir)\n', (686, 691), False, 'from pathlib import Path\n'), ((778, 816), 'glob.glob', 'glob.glob', (['search_path'], {'recursive': '(True)'}), '(search_path, recursive=True)\n', (787, 816), False, 'import glob\n'), ((893, 932), 'glob.glob', 'glob.glob', (['search_path'], {'recursive': '(False)'}), '(search_path, recursive=False)\n', (902, 932), False, 'import glob\n'), ((278, 288), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (282, 288), False, 'from pathlib import Path\n'), ((490, 500), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (494, 500), False, 'from pathlib import Path\n'), ((975, 982), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (979, 982), False, 'from pathlib import Path\n')]
from bluetooth import * from time import sleep import re, uuid devices = set() devices_to_update = set() dev_mac = ':'.join(re.findall('..', '%012x' % uuid.getnode())).upper() print(dev_mac) def enable_ble(): print('enabling bluetooth') try: os.system('sudo systemctl start bluetooth.service && sudo hciconfig hci0 up') except Exception as e: print(e) def job(): print("\n\nPerforming inquiry...") new_devices = set() global devices_to_update services = find_service(name="helloService") print(services) for i in range(len(services)): match = services[i] if match["name"] == "helloService": port = match["port"] name = match["name"] host = match["host"] print(name, port, host) new_devices.add(host) devices_diff_set = devices.symmetric_difference(new_devices) print("Diff set = %s"%devices_diff_set) devices_modified = False if len(devices_diff_set) > 0: for addr in devices_diff_set: if addr in new_devices: devices.add(addr) devices_to_update.add(addr) devices_modified = True else: print("Removing device %s" % addr) devices.remove(addr) if addr in devices_to_update: devices_to_update.remove(addr) devices_modified = True if devices_modified: # need to notify to all devices devices_to_update = devices_to_update.union(devices) if len(devices_to_update) > 0: update_message = "%sEOD" % devices print("Update message = %s" % update_message) for addr in devices_to_update.copy(): try: print("Connecting to %s to send updated list" % addr) client_socket = BluetoothSocket(RFCOMM) client_socket.connect((addr, 1)) client_socket.send(update_message) print("Sent to %s" % addr) client_socket.close() devices_to_update.remove(addr) except Exception as e: print(e) else: print("No updates to send") enable_ble() while True: job() sleep(5)
[ "uuid.getnode", "time.sleep" ]
[((2263, 2271), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (2268, 2271), False, 'from time import sleep\n'), ((153, 167), 'uuid.getnode', 'uuid.getnode', ([], {}), '()\n', (165, 167), False, 'import re, uuid\n')]
from functools import wraps from pastry.models import User from flask import request, abort, jsonify def parse_api_key(): key = None if request.args.get('api_key'): key = request.args.get('api_key') elif request.form.get('api_key'): key = request.form.get('api_key') return key def login_required(f): @wraps(f) def decorated(*args, **kwargs): if request.method != 'OPTIONS': # Verify auth-token or api_key is present token = request.headers.get('Auth-Token') api_key = parse_api_key() if not token and not api_key: abort(401) # Verify key/token if api_key: if not User.verify_api_key(api_key): response = jsonify({ 'message': 'Invalid API Key', 'invalid_api_key': True, }) response.status_code = 401 return response elif token: if not User.verify_auth_token(token): response = jsonify({ 'message': 'Expired Token', 'expired_token': True, }) response.status_code = 401 return response return f(*args, **kwargs) return decorated
[ "flask.request.args.get", "flask.request.headers.get", "flask.request.form.get", "pastry.models.User.verify_auth_token", "flask.abort", "flask.jsonify", "pastry.models.User.verify_api_key", "functools.wraps" ]
[((146, 173), 'flask.request.args.get', 'request.args.get', (['"""api_key"""'], {}), "('api_key')\n", (162, 173), False, 'from flask import request, abort, jsonify\n'), ((342, 350), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (347, 350), False, 'from functools import wraps\n'), ((189, 216), 'flask.request.args.get', 'request.args.get', (['"""api_key"""'], {}), "('api_key')\n", (205, 216), False, 'from flask import request, abort, jsonify\n'), ((226, 253), 'flask.request.form.get', 'request.form.get', (['"""api_key"""'], {}), "('api_key')\n", (242, 253), False, 'from flask import request, abort, jsonify\n'), ((269, 296), 'flask.request.form.get', 'request.form.get', (['"""api_key"""'], {}), "('api_key')\n", (285, 296), False, 'from flask import request, abort, jsonify\n'), ((502, 535), 'flask.request.headers.get', 'request.headers.get', (['"""Auth-Token"""'], {}), "('Auth-Token')\n", (521, 535), False, 'from flask import request, abort, jsonify\n'), ((632, 642), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (637, 642), False, 'from flask import request, abort, jsonify\n'), ((722, 750), 'pastry.models.User.verify_api_key', 'User.verify_api_key', (['api_key'], {}), '(api_key)\n', (741, 750), False, 'from pastry.models import User\n'), ((783, 847), 'flask.jsonify', 'jsonify', (["{'message': 'Invalid API Key', 'invalid_api_key': True}"], {}), "({'message': 'Invalid API Key', 'invalid_api_key': True})\n", (790, 847), False, 'from flask import request, abort, jsonify\n'), ((1050, 1079), 'pastry.models.User.verify_auth_token', 'User.verify_auth_token', (['token'], {}), '(token)\n', (1072, 1079), False, 'from pastry.models import User\n'), ((1112, 1172), 'flask.jsonify', 'jsonify', (["{'message': 'Expired Token', 'expired_token': True}"], {}), "({'message': 'Expired Token', 'expired_token': True})\n", (1119, 1172), False, 'from flask import request, abort, jsonify\n')]
"""Draw a imdt calendar image.""" from contextlib import contextmanager from functools import partial from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth from imperial_calendar.transform import ( grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd, ) import typing as t import xml.etree.ElementTree as ET def next_grdt_day_of(grdt: GregorianDateTime) -> GregorianDateTime: """Create a new grdt on the next day.""" grdt = grdt.copy() days: int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][grdt.month - 1] # NOTE: Don't consider about Julian Calendar, before Gregorian Calendar begins. if grdt.month == 2 and ( grdt.year % 4 == 0 and (grdt.year % 100 != 0 or grdt.year % 400 == 0) ): days = 29 if grdt.day == days: if grdt.month == 12: grdt.year += 1 # NOTE: Don't consider about B.C. 1, A.D. 1. grdt.month = 1 else: grdt.month += 1 grdt.day = 1 else: grdt.day += 1 return grdt def grdt_to_imdt( grdt: GregorianDateTime, imdt_timezone: t.Optional[str] ) -> ImperialDateTime: """Transform grdt to imdt.""" imdt_timezone = imdt_timezone or "+00:00" juld = grdt_to_juld(grdt.to_utc_naive()) tert = juld_to_tert(juld) mrsd = tert_to_mrsd(tert) imsn = mrsd_to_imsn(mrsd) return ImperialDateTime.from_standard_naive(imsn_to_imdt(imsn), imdt_timezone) def imdt_to_grdt(imdt: ImperialDateTime, grdt_timezone: str) -> GregorianDateTime: """Transform imdt to grdt.""" imsn = imdt_to_imsn(imdt.to_standard_naive()) mrsd = imsn_to_mrsd(imsn) tert = mrsd_to_tert(mrsd) juld = tert_to_juld(tert) return GregorianDateTime.from_utc_naive(juld_to_grdt(juld), grdt_timezone) @contextmanager def e( tag: str, attrib: t.Dict[str, str] = {}, text: str = "", parent: ET.Element = None ) -> t.Generator[t.Callable[[str, t.Dict[str, str], str], t.Any], None, t.Any]: """Create a XML element and pass a new context for sub elements.""" if parent is not None: element = ET.SubElement(parent, tag, attrib) else: element = ET.Element(tag, attrib) if text != "": element.text = text yield partial(e, parent=element) def text_y(y: t.Union[float, str], font_size: t.Union[float, str]) -> float: """ Caliculate the y value of the SVG text element. y: mm font_size: pt """ return float(y) + float(font_size) * 0.353 class CalendarImage(object): """Draw a imdt calendar image.""" BLACK: str = "#000000" # "#3b3b3b" BLUE: str = "#008dcc" # "#40a1cc" FONT_FAMILY_BOLD: str = """fot-tsukubrdgothic-std, "FOT-TsukuBRdGothic Std B", "FOT-筑紫B丸ゴシック Std B", TsukuBRdGothic-Bold, "筑紫B丸ゴシック ボールド", sans-serif""" # noqa: E501 FONT_FAMILY_REGULAR: str = """fot-tsukubrdgothic-std, "FOT-TsukuBRdGothic Std B", "FOT-筑紫B丸ゴシック Std B", TsukuBRdGothic-Bold, "筑紫B丸ゴシック ボールド", sans-serif""" # noqa: E501 FONT_SIZE_ANNOTATION: float = 8.0 FONT_SIZE_BOLD_LARGE: float = 32.0 FONT_SIZE_LARGE: float = 20.0 FONT_SIZE_SMALL: float = 10.0 GRAY_BLUE: str = "#6bb4d6" # "#a5c7d6" GRAY_RED: str = "#ff9d80" # "#ffb7a1" GRAY: str = "#888888" # "#999999" HEIGHT_DAYS_GAP: float = 4.5 HEIGHT_GRDT_BELT: float = 5.5 HEIGHT_TOP_SPACE: float = 15.0 RED: str = "#e03f0c" # "#e07553" SIZE_DAY_SQUARE: float = 22.5 STROKE_WIDTH_BOLD: str = "0.4mm" STROKE_WIDTH_THIN: str = "0.15mm" WHITE: str = "#ffffff" # "#ffffff" WIDTH_LEFT_SPACE: float = 45.0 grdt_timezone: str imdt: ImperialDateTime def __init__(self, imdt: ImperialDateTime, grdt_timezone: str): """Init.""" self.grdt_timezone = grdt_timezone self.imdt = imdt.copy() self.imdt.day = 1 self.imdt.hour = 0 self.imdt.minute = 0 self.imdt.second = 0 def draw_as_svg(self) -> str: """Draw a imdt calendar image as SVG string.""" svg = ET.Element( "svg", { "height": "148mm", "style": f""" background-color: {CalendarImage.WHITE}; """.strip(), "width": "210mm", "xmlns": "http://www.w3.org/2000/svg", }, ) with e("title", {}, f"帝國火星曆{self.imdt.year}年{self.imdt.month}月", parent=svg): pass with e( "g", {"font-family": CalendarImage.FONT_FAMILY_REGULAR}, parent=svg, ) as _e: self.__draw_title(_e) self.__draw_joubi(_e) self.__draw_static_frame(_e) self.__draw_imdt_days(_e) self.__draw_imdt_syukuzitu(_e) self.__draw_grdt_days(_e) # return ET.tostring(svg, encoding="utf-8", xml_declaration=True) return ET.tostring(svg, encoding="utf-8") def __draw_grdt_day(self, _e, grdt: GregorianDateTime) -> None: imdt = grdt_to_imdt(grdt, self.imdt.timezone) line_x = ( (((imdt.hour * 60) + imdt.minute) * 60 + imdt.second) / (24 * 60 * 60) * CalendarImage.SIZE_DAY_SQUARE ) if imdt.month == self.imdt.month: x1 = ( CalendarImage.WIDTH_LEFT_SPACE + line_x + CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7) ) x2 = ( CalendarImage.WIDTH_LEFT_SPACE + line_x + CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7) ) y1 = ( CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + ( CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP + CalendarImage.SIZE_DAY_SQUARE ) * ((imdt.day - 1) // 7) ) y2 = ( CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + ( CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP + CalendarImage.SIZE_DAY_SQUARE ) * ((imdt.day - 1) // 7) ) with _e( "line", { "stroke": CalendarImage.BLACK, "stroke-width": CalendarImage.STROKE_WIDTH_THIN, "x1": f"{x1}mm", "x2": f"{x2}mm", "y1": f"{y1}mm", "y2": f"{y2}mm", }, ): pass next_grdt_day = next_grdt_day_of(grdt) next_grdt_day_imdt = grdt_to_imdt(next_grdt_day, self.imdt.timezone) next_line_x = ( ( ((next_grdt_day_imdt.hour * 60) + next_grdt_day_imdt.minute) * 60 + next_grdt_day_imdt.second ) / (24 * 60 * 60) * CalendarImage.SIZE_DAY_SQUARE ) is_drawable_on_beginning_of_month = ( next_line_x > 0.353 * CalendarImage.FONT_SIZE_SMALL * (len("10/10") * 0.6) + 1.5 ) if grdt.is_holiday or grdt.weekday == 7: color = CalendarImage.GRAY_RED elif grdt.weekday == 6: color = CalendarImage.GRAY_BLUE else: color = CalendarImage.GRAY if imdt.month == self.imdt.month: text = ( f"{grdt.month}/{grdt.day}" if grdt.day == 1 or (imdt.day == 1 and not is_drawable_on_beginning_of_month) else str(grdt.day) ) is_drawable_on_weekend = ( CalendarImage.SIZE_DAY_SQUARE - line_x ) > 0.353 * CalendarImage.FONT_SIZE_SMALL * (len(text) * 0.6) + 1.5 if ( imdt.day == ImperialYearMonth(self.imdt.year, self.imdt.month).days() and not is_drawable_on_weekend ): pass elif imdt.day % 7 == 0 and not is_drawable_on_weekend: self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_SMALL, "x": f"{CalendarImage.WIDTH_LEFT_SPACE + 1}mm", "y": CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + 0.5 + ( CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP + CalendarImage.SIZE_DAY_SQUARE ) * (imdt.day // 7), }, text, ) else: x = ( CalendarImage.WIDTH_LEFT_SPACE + line_x + 1 + CalendarImage.SIZE_DAY_SQUARE * ((imdt.day - 1) % 7) ) self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_SMALL, "x": f"{x}mm", "y": CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + 0.5 + ( CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP + CalendarImage.SIZE_DAY_SQUARE ) * ((imdt.day - 1) // 7), }, text, ) else: if is_drawable_on_beginning_of_month: self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_SMALL, "x": f"{CalendarImage.WIDTH_LEFT_SPACE + 1}mm", "y": CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + 0.5, }, f"{grdt.month}/{grdt.day}", ) def __draw_grdt_days(self, _e) -> None: drawing_grdt_day = imdt_to_grdt(self.imdt, self.grdt_timezone) drawing_grdt_day.hour = 0 drawing_grdt_day.minute = 0 drawing_grdt_day.second = 0 while ( grdt_to_imdt(drawing_grdt_day, self.imdt.timezone) < self.__next_imdt_month() ): self.__draw_grdt_day(_e, drawing_grdt_day) drawing_grdt_day = next_grdt_day_of(drawing_grdt_day) def __draw_imdt_days(self, _e) -> None: for day in range( 1, ImperialYearMonth(self.imdt.year, self.imdt.month).days() + 1 ): imdt = self.imdt.copy() imdt.day = day if imdt.holiday is not None or day % 7 == 1: color = CalendarImage.RED elif day % 7 == 0: color = CalendarImage.BLUE else: color = CalendarImage.BLACK x = ( CalendarImage.WIDTH_LEFT_SPACE + 1 + CalendarImage.SIZE_DAY_SQUARE * ((day - 1) % 7) ) y = ( CalendarImage.HEIGHT_TOP_SPACE + 1 + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * ((day - 1) // 7) ) self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_SMALL, "x": f"{x}mm", "y": y, }, str(day), ) if imdt.holiday is not None: day_width = ( CalendarImage.FONT_SIZE_SMALL * (0.353 - 0.06) * len(str(day)) ) self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_ANNOTATION, "style": f""" inline-size: {CalendarImage.SIZE_DAY_SQUARE - day_width - 1}mm; """.strip(), "x": f"{x + day_width}mm", "y": y + 0.2, }, str("・".join(imdt.holiday.names)), ) # with _e( # "foreignObject", # { # "height": f"{CalendarImage.SIZE_DAY_SQUARE}mm", # "requiredExtensions": "http://www.w3.org/1999/xhtml", # "width": f"{CalendarImage.SIZE_DAY_SQUARE - day_width - 1}mm", # "x": f"{x + day_width}mm", # "y": f"{y + 0.2}mm", # }, # ) as _e1: # with _e1( # "div", # { # "style": f""" # color: {color}; # font-size: {CalendarImage.FONT_SIZE_ANNOTATION}pt; # """.strip(), # "xmlns": "http://www.w3.org/1999/xhtml", # }, # str("・".join(imdt.holiday.names)), # ): # pass def __draw_imdt_syukuzitu(self, _e) -> None: pass def __draw_joubi(self, _e) -> None: for i, (joubi, color) in enumerate( [ ("日", CalendarImage.RED), ("月", CalendarImage.BLACK), ("火", CalendarImage.BLACK), ("水", CalendarImage.BLACK), ("木", CalendarImage.BLACK), ("金", CalendarImage.BLACK), ("土", CalendarImage.BLUE), ] ): x = ( CalendarImage.WIDTH_LEFT_SPACE + (CalendarImage.SIZE_DAY_SQUARE / 2) - 2.0 + CalendarImage.SIZE_DAY_SQUARE * i ) self.__draw_text( _e, { "fill": color, "font-size": CalendarImage.FONT_SIZE_SMALL, "x": f"{x}mm", "y": CalendarImage.HEIGHT_TOP_SPACE - 5, }, joubi, ) def __draw_static_frame(self, _e) -> None: days = ImperialYearMonth(self.imdt.year, self.imdt.month).days() for i in range(4): days_of_week = 6 if i == 3 and days == 27 else 7 y = ( CalendarImage.HEIGHT_TOP_SPACE + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * i ) with _e( "rect", { "fill": CalendarImage.WHITE, "height": f"{CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT}mm", "stroke-width": CalendarImage.STROKE_WIDTH_BOLD, "stroke": CalendarImage.BLACK, "width": f"{CalendarImage.SIZE_DAY_SQUARE * days_of_week}mm", "x": f"{CalendarImage.WIDTH_LEFT_SPACE}mm", "y": f"{y}mm", }, ): pass y1 = ( CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * i ) y2 = ( CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * i ) with _e( "line", { "stroke-width": CalendarImage.STROKE_WIDTH_THIN, "stroke": CalendarImage.BLACK, "x1": f"{CalendarImage.WIDTH_LEFT_SPACE}mm", "x2": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * days_of_week}mm", "y1": f"{y1}mm", "y2": f"{y2}mm", }, ): pass for j in range(days_of_week): y1 = ( CalendarImage.HEIGHT_TOP_SPACE + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * i ) y2 = ( CalendarImage.HEIGHT_TOP_SPACE + CalendarImage.SIZE_DAY_SQUARE + ( CalendarImage.SIZE_DAY_SQUARE + CalendarImage.HEIGHT_GRDT_BELT + CalendarImage.HEIGHT_DAYS_GAP ) * i ) with _e( "line", { "stroke-width": CalendarImage.STROKE_WIDTH_BOLD, "stroke": CalendarImage.BLACK, "x1": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * (j + 1)}mm", "x2": f"{CalendarImage.WIDTH_LEFT_SPACE + CalendarImage.SIZE_DAY_SQUARE * (j + 1)}mm", "y1": f"{y1}mm", "y2": f"{y2}mm", }, ): pass def __draw_text( self, _e, attrib: t.Dict[str, t.Union[str, float]], text: str ) -> None: attrib["y"] = "{}mm".format(text_y(attrib["y"], attrib["font-size"])) attrib["font-size"] = "{}pt".format(attrib["font-size"]) with _e("text", attrib, text): pass def __draw_title(self, _e) -> None: self.__draw_text( _e, { "fill": CalendarImage.BLACK, "font-size": CalendarImage.FONT_SIZE_LARGE, "x": "5mm", "y": 9.5, }, "帝國火星暦", ) self.__draw_text( _e, { "fill": CalendarImage.BLACK, "font-size": CalendarImage.FONT_SIZE_LARGE, "x": "11mm", "y": 18.0, }, f"{self.imdt.year}年", ) with _e( "svg", { "height": "44mm", "style": """ background-color: transparent; """.strip(), "width": f"{CalendarImage.WIDTH_LEFT_SPACE - 8}mm", "x": "0mm", "y": "28mm", }, ) as _e2: self.__draw_text( _e2, { "fill": CalendarImage.BLACK, "font-family": CalendarImage.FONT_FAMILY_BOLD, "font-size": CalendarImage.FONT_SIZE_BOLD_LARGE, "text-anchor": "middle", "x": "64%", "y": 0.0, }, f"{self.imdt.month}月", ) self.__draw_text( _e, { "fill": CalendarImage.BLACK, "font-size": CalendarImage.FONT_SIZE_LARGE, "x": "9.5mm", "y": 42.0, }, f"({self.imdt.japanese_month_name}月)", ) self.__draw_text( _e, { "fill": CalendarImage.GRAY, "font-size": CalendarImage.FONT_SIZE_ANNOTATION, "x": f"{CalendarImage.WIDTH_LEFT_SPACE - 5.5}mm", "y": 52.0, }, "~", ) with _e( "svg", { "height": "8mm", "style": """ background-color: transparent; """.strip(), "width": f"{CalendarImage.WIDTH_LEFT_SPACE - 8}mm", "x": "2mm", "y": "52mm", }, ) as _e2: weekdays = ["月", "火", "水", "木", "金", "土", "日"] grdt_start = imdt_to_grdt(self.imdt, self.grdt_timezone) grdt_start_weekday = weekdays[grdt_start.weekday - 1] grdt_end = imdt_to_grdt(self.__next_imdt_month(), self.grdt_timezone) grdt_end_weekday = weekdays[grdt_end.weekday - 1] self.__draw_text( _e2, { "fill": CalendarImage.GRAY, "font-size": CalendarImage.FONT_SIZE_ANNOTATION, "text-anchor": "end", "x": "100%", "y": 0.0, }, "{}/{}/{}({}){:02}:{:02}:{:02}".format( grdt_start.year, grdt_start.month, grdt_start.day, grdt_start_weekday, grdt_start.hour, grdt_start.minute, grdt_start.second, ), ) text = "" if grdt_start.year != grdt_end.year: text += f"{grdt_end.year}/" text += "{}/{}({}){:02}:{:02}:{:02}".format( grdt_end.month, grdt_end.day, grdt_end_weekday, grdt_end.hour, grdt_end.minute, grdt_end.second, ) self.__draw_text( _e2, { "fill": CalendarImage.GRAY, "font-size": CalendarImage.FONT_SIZE_ANNOTATION, "text-anchor": "end", "x": "100%", "y": 4.0, }, text, ) def __next_imdt_month(self) -> ImperialDateTime: next_month = ImperialYearMonth(self.imdt.year, self.imdt.month).next_month() return ImperialDateTime( next_month.year, next_month.month, 1, 0, 0, 0, self.imdt.timezone )
[ "functools.partial", "imperial_calendar.transform.imsn_to_mrsd", "imperial_calendar.transform.mrsd_to_tert", "imperial_calendar.transform.mrsd_to_imsn", "xml.etree.ElementTree.Element", "imperial_calendar.ImperialDateTime", "imperial_calendar.transform.tert_to_mrsd", "imperial_calendar.transform.juld_to_grdt", "imperial_calendar.transform.tert_to_juld", "imperial_calendar.transform.juld_to_tert", "xml.etree.ElementTree.SubElement", "imperial_calendar.transform.imsn_to_imdt", "imperial_calendar.ImperialYearMonth", "xml.etree.ElementTree.tostring" ]
[((1404, 1422), 'imperial_calendar.transform.juld_to_tert', 'juld_to_tert', (['juld'], {}), '(juld)\n', (1416, 1422), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1434, 1452), 'imperial_calendar.transform.tert_to_mrsd', 'tert_to_mrsd', (['tert'], {}), '(tert)\n', (1446, 1452), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1464, 1482), 'imperial_calendar.transform.mrsd_to_imsn', 'mrsd_to_imsn', (['mrsd'], {}), '(mrsd)\n', (1476, 1482), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1746, 1764), 'imperial_calendar.transform.imsn_to_mrsd', 'imsn_to_mrsd', (['imsn'], {}), '(imsn)\n', (1758, 1764), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1776, 1794), 'imperial_calendar.transform.mrsd_to_tert', 'mrsd_to_tert', (['mrsd'], {}), '(mrsd)\n', (1788, 1794), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1806, 1824), 'imperial_calendar.transform.tert_to_juld', 'tert_to_juld', (['tert'], {}), '(tert)\n', (1818, 1824), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1531, 1549), 'imperial_calendar.transform.imsn_to_imdt', 'imsn_to_imdt', (['imsn'], {}), '(imsn)\n', (1543, 1549), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((1869, 1887), 'imperial_calendar.transform.juld_to_grdt', 'juld_to_grdt', (['juld'], {}), '(juld)\n', (1881, 1887), False, 'from imperial_calendar.transform import grdt_to_juld, imdt_to_imsn, imsn_to_imdt, imsn_to_mrsd, juld_to_grdt, juld_to_tert, mrsd_to_imsn, mrsd_to_tert, tert_to_juld, tert_to_mrsd\n'), ((2213, 2247), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['parent', 'tag', 'attrib'], {}), '(parent, tag, attrib)\n', (2226, 2247), True, 'import xml.etree.ElementTree as ET\n'), ((2276, 2299), 'xml.etree.ElementTree.Element', 'ET.Element', (['tag', 'attrib'], {}), '(tag, attrib)\n', (2286, 2299), True, 'import xml.etree.ElementTree as ET\n'), ((2357, 2383), 'functools.partial', 'partial', (['e'], {'parent': 'element'}), '(e, parent=element)\n', (2364, 2383), False, 'from functools import partial\n'), ((4995, 5029), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['svg'], {'encoding': '"""utf-8"""'}), "(svg, encoding='utf-8')\n", (5006, 5029), True, 'import xml.etree.ElementTree as ET\n'), ((23099, 23187), 'imperial_calendar.ImperialDateTime', 'ImperialDateTime', (['next_month.year', 'next_month.month', '(1)', '(0)', '(0)', '(0)', 'self.imdt.timezone'], {}), '(next_month.year, next_month.month, 1, 0, 0, 0, self.imdt.\n timezone)\n', (23115, 23187), False, 'from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth\n'), ((15109, 15159), 'imperial_calendar.ImperialYearMonth', 'ImperialYearMonth', (['self.imdt.year', 'self.imdt.month'], {}), '(self.imdt.year, self.imdt.month)\n', (15126, 15159), False, 'from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth\n'), ((23020, 23070), 'imperial_calendar.ImperialYearMonth', 'ImperialYearMonth', (['self.imdt.year', 'self.imdt.month'], {}), '(self.imdt.year, self.imdt.month)\n', (23037, 23070), False, 'from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth\n'), ((11154, 11204), 'imperial_calendar.ImperialYearMonth', 'ImperialYearMonth', (['self.imdt.year', 'self.imdt.month'], {}), '(self.imdt.year, self.imdt.month)\n', (11171, 11204), False, 'from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth\n'), ((8123, 8173), 'imperial_calendar.ImperialYearMonth', 'ImperialYearMonth', (['self.imdt.year', 'self.imdt.month'], {}), '(self.imdt.year, self.imdt.month)\n', (8140, 8173), False, 'from imperial_calendar import GregorianDateTime, ImperialDateTime, ImperialYearMonth\n')]
""" NamedConf parser - file ``/etc/named.conf`` =========================================== NamedConf parser the file named configuration file. Named is a name server used by BIND. """ from insights.specs import Specs from insights.core.plugins import parser from insights.parsers import SkipException from insights.parsers.named_checkconf import NamedCheckconf @parser(Specs.named_conf) class NamedConf(NamedCheckconf): """ Class for parsing the file ``/etc/named.conf```, We use class ``NamedCheckConf`` to parse most of the named.conf configurations and class ``NamedConf`` to parse the `include` directives. .. note:: Please refer to the super-class :py:class:`insights.parsers.named_checkconf:NamedCheckConf` for more usage information. Attributes: includes (list): List of files in 'include' section. Raises: SkipException: When content is empty or cannot be parsed. Examples: >>> named_conf.includes ['/etc/crypto-policies/back-ends/bind.config'] """ def parse_content(self, content): includes = [] super(NamedConf, self).parse_content(content) try: for line in [l for l in content if l.strip().startswith('include ') and ';' in l]: includes.append(line.split(';')[0].replace('"', '').split()[1]) except IndexError: raise SkipException("Syntax error of include directive") self.includes = includes
[ "insights.core.plugins.parser", "insights.parsers.SkipException" ]
[((367, 391), 'insights.core.plugins.parser', 'parser', (['Specs.named_conf'], {}), '(Specs.named_conf)\n', (373, 391), False, 'from insights.core.plugins import parser\n'), ((1395, 1445), 'insights.parsers.SkipException', 'SkipException', (['"""Syntax error of include directive"""'], {}), "('Syntax error of include directive')\n", (1408, 1445), False, 'from insights.parsers import SkipException\n')]
from readwrite import get_data import pandas as pd import matplotlib.pyplot as plt from scipy.stats import gaussian_kde import numpy as np def scatter(path, name): data = get_data(path) pd_data = pd.DataFrame(data) plt.title("column 0 " + name) plt.plot(pd_data[0]) plt.show() plt.title("column 1 " + name) plt.plot(pd_data[1]) plt.show() plt.title("column 2 " + name) plt.plot(pd_data[2]) plt.show() return pd_data def boxplot(datas, names): column0 = [] column1 = [] column2 = [] labels = [] for i in range(len(datas)): column0.append(datas[i][0]) column1.append(datas[i][1]) column2.append(datas[i][2]) labels.append(names[i]) plt.title("Boxplot column 0") plt.boxplot(column0, labels=labels) plt.show() plt.title("Boxplot column 1") plt.boxplot(column1, labels=labels) plt.show() plt.title("Boxplot column 2") plt.boxplot(column2, labels=labels) plt.show() def draw_gaussian(datas): pdf_ticks = np.linspace(0, 10000, 100000, endpoint=False) density = gaussian_kde(datas) plt.plot(pdf_ticks, density(pdf_ticks), color='r') plt.show()
[ "pandas.DataFrame", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.boxplot", "scipy.stats.gaussian_kde", "numpy.linspace", "readwrite.get_data" ]
[((173, 187), 'readwrite.get_data', 'get_data', (['path'], {}), '(path)\n', (181, 187), False, 'from readwrite import get_data\n'), ((199, 217), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (211, 217), True, 'import pandas as pd\n'), ((220, 249), 'matplotlib.pyplot.title', 'plt.title', (["('column 0 ' + name)"], {}), "('column 0 ' + name)\n", (229, 249), True, 'import matplotlib.pyplot as plt\n'), ((251, 271), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[0]'], {}), '(pd_data[0])\n', (259, 271), True, 'import matplotlib.pyplot as plt\n'), ((273, 283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (281, 283), True, 'import matplotlib.pyplot as plt\n'), ((286, 315), 'matplotlib.pyplot.title', 'plt.title', (["('column 1 ' + name)"], {}), "('column 1 ' + name)\n", (295, 315), True, 'import matplotlib.pyplot as plt\n'), ((317, 337), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[1]'], {}), '(pd_data[1])\n', (325, 337), True, 'import matplotlib.pyplot as plt\n'), ((339, 349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (347, 349), True, 'import matplotlib.pyplot as plt\n'), ((352, 381), 'matplotlib.pyplot.title', 'plt.title', (["('column 2 ' + name)"], {}), "('column 2 ' + name)\n", (361, 381), True, 'import matplotlib.pyplot as plt\n'), ((383, 403), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[2]'], {}), '(pd_data[2])\n', (391, 403), True, 'import matplotlib.pyplot as plt\n'), ((405, 415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (413, 415), True, 'import matplotlib.pyplot as plt\n'), ((665, 694), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 0"""'], {}), "('Boxplot column 0')\n", (674, 694), True, 'import matplotlib.pyplot as plt\n'), ((696, 731), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column0'], {'labels': 'labels'}), '(column0, labels=labels)\n', (707, 731), True, 'import matplotlib.pyplot as plt\n'), ((733, 743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (741, 743), True, 'import matplotlib.pyplot as plt\n'), ((746, 775), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 1"""'], {}), "('Boxplot column 1')\n", (755, 775), True, 'import matplotlib.pyplot as plt\n'), ((777, 812), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column1'], {'labels': 'labels'}), '(column1, labels=labels)\n', (788, 812), True, 'import matplotlib.pyplot as plt\n'), ((814, 824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (822, 824), True, 'import matplotlib.pyplot as plt\n'), ((827, 856), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 2"""'], {}), "('Boxplot column 2')\n", (836, 856), True, 'import matplotlib.pyplot as plt\n'), ((858, 893), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column2'], {'labels': 'labels'}), '(column2, labels=labels)\n', (869, 893), True, 'import matplotlib.pyplot as plt\n'), ((895, 905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (903, 905), True, 'import matplotlib.pyplot as plt\n'), ((946, 991), 'numpy.linspace', 'np.linspace', (['(0)', '(10000)', '(100000)'], {'endpoint': '(False)'}), '(0, 10000, 100000, endpoint=False)\n', (957, 991), True, 'import numpy as np\n'), ((1004, 1023), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['datas'], {}), '(datas)\n', (1016, 1023), False, 'from scipy.stats import gaussian_kde\n'), ((1077, 1087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1085, 1087), True, 'import matplotlib.pyplot as plt\n')]
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. import onnx from onnx import helper, TensorProto QUERY_TENSOR = helper.make_tensor_value_info('query_tensor', TensorProto.FLOAT, ['batch', 4]) ATTRIBUTE_TENSOR = helper.make_tensor_value_info('attribute_tensor', TensorProto.FLOAT, [4, 1]) BIAS_TENSOR = helper.make_tensor_value_info('bias_tensor', TensorProto.FLOAT, ['batch', -1]) OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, ['batch', 1]) nodes = [ helper.make_node( 'MatMul', ['query_tensor', 'attribute_tensor'], ['matmul'], ), helper.make_node( 'ReduceSum', ['bias_tensor'], ['reduce'], axes=[1] ), helper.make_node( 'Add', ['matmul', 'reduce'], ['output'], ), ] graph_def = helper.make_graph( nodes, 'dynamic_scoring', [ QUERY_TENSOR, ATTRIBUTE_TENSOR, BIAS_TENSOR, ], [OUTPUT], ) model_def = helper.make_model(graph_def, producer_name='dynamic.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) onnx.save(model_def, 'dynamic.onnx')
[ "onnx.helper.make_node", "onnx.save", "onnx.helper.make_tensor_value_info", "onnx.OperatorSetIdProto", "onnx.helper.make_graph" ]
[((169, 247), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""query_tensor"""', 'TensorProto.FLOAT', "['batch', 4]"], {}), "('query_tensor', TensorProto.FLOAT, ['batch', 4])\n", (198, 247), False, 'from onnx import helper, TensorProto\n'), ((267, 343), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""attribute_tensor"""', 'TensorProto.FLOAT', '[4, 1]'], {}), "('attribute_tensor', TensorProto.FLOAT, [4, 1])\n", (296, 343), False, 'from onnx import helper, TensorProto\n'), ((358, 436), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""bias_tensor"""', 'TensorProto.FLOAT', "['batch', -1]"], {}), "('bias_tensor', TensorProto.FLOAT, ['batch', -1])\n", (387, 436), False, 'from onnx import helper, TensorProto\n'), ((446, 518), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.FLOAT', "['batch', 1]"], {}), "('output', TensorProto.FLOAT, ['batch', 1])\n", (475, 518), False, 'from onnx import helper, TensorProto\n'), ((863, 967), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""dynamic_scoring"""', '[QUERY_TENSOR, ATTRIBUTE_TENSOR, BIAS_TENSOR]', '[OUTPUT]'], {}), "(nodes, 'dynamic_scoring', [QUERY_TENSOR, ATTRIBUTE_TENSOR,\n BIAS_TENSOR], [OUTPUT])\n", (880, 967), False, 'from onnx import helper, TensorProto\n'), ((1136, 1172), 'onnx.save', 'onnx.save', (['model_def', '"""dynamic.onnx"""'], {}), "(model_def, 'dynamic.onnx')\n", (1145, 1172), False, 'import onnx\n'), ((534, 610), 'onnx.helper.make_node', 'helper.make_node', (['"""MatMul"""', "['query_tensor', 'attribute_tensor']", "['matmul']"], {}), "('MatMul', ['query_tensor', 'attribute_tensor'], ['matmul'])\n", (550, 610), False, 'from onnx import helper, TensorProto\n'), ((647, 715), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceSum"""', "['bias_tensor']", "['reduce']"], {'axes': '[1]'}), "('ReduceSum', ['bias_tensor'], ['reduce'], axes=[1])\n", (663, 715), False, 'from onnx import helper, TensorProto\n'), ((759, 816), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['matmul', 'reduce']", "['output']"], {}), "('Add', ['matmul', 'reduce'], ['output'])\n", (775, 816), False, 'from onnx import helper, TensorProto\n'), ((1098, 1133), 'onnx.OperatorSetIdProto', 'onnx.OperatorSetIdProto', ([], {'version': '(12)'}), '(version=12)\n', (1121, 1133), False, 'import onnx\n')]
import time current_time = time.localtime() hour = current_time.tm_hour print('The hour is', hour)
[ "time.localtime" ]
[((28, 44), 'time.localtime', 'time.localtime', ([], {}), '()\n', (42, 44), False, 'import time\n')]
from datetime import timedelta DEFAULT_REQUIRED_CONFIRMATIONS: int = 10 MAX_FILTER_INTERVAL: int = 100_000 DEFAULT_GAS_BUFFER_FACTOR: int = 10 DEFAULT_GAS_CHECK_BLOCKS: int = 100 KEEP_MRS_WITHOUT_CHANNEL: timedelta = timedelta(minutes=15) # A LockedTransfer message is roughly 1kb. Having 1000/min = 17/sec will be # hard to achieve outside of benchmarks for now. To have some safety margin for # bursts of messages, this is only enforced as an average over 5 minutes. MATRIX_RATE_LIMIT_ALLOWED_BYTES = 5_000_000 MATRIX_RATE_LIMIT_RESET_INTERVAL = timedelta(minutes=5) # Number of blocks after the close, during which MRs are still being accepted CHANNEL_CLOSE_MARGIN: int = 10
[ "datetime.timedelta" ]
[((218, 239), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (227, 239), False, 'from datetime import timedelta\n'), ((550, 570), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (559, 570), False, 'from datetime import timedelta\n')]
import re # TO-DO: refactor validators as below # https://pydantic-docs.helpmanual.io/usage/validators/ # Email regex mostly following RFC2822 specification. Covers ~99% of emails in use today # Allows groups of alphanumerics and some special characters separated by dots, # followed by a @, # followed by groups of alphanumerics and non-staring/non-ending dashes, # separated by dots. EMAIL_REGEX = re.compile( r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*" r"@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)" r"+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" ) # Simple password regex, requires a minimum of 8 characters with at least one # uppercase letter, one lowercase letter, and one number. PASSWORD_REGEX = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,}$") # Simple username regex, number of characters between 3-24, allowing only alphanumerics, # dashes and underscores. USERNAME_REGEX = re.compile(r"^[a-zA-Z0-9-_]{3,24}$") def valid_email(email_string: str) -> bool: return EMAIL_REGEX.match(email_string) is not None def valid_password(password_string: str) -> bool: return PASSWORD_REGEX.match(password_string) is not None def valid_username(username_string: str) -> bool: return USERNAME_REGEX.match(username_string) is not None
[ "re.compile" ]
[((402, 561), 're.compile', 're.compile', (['"""[a-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&\'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"""'], {}), '(\n "[a-z0-9!#$%&\'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&\'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"\n )\n', (412, 561), False, 'import re\n'), ((726, 778), 're.compile', 're.compile', (['"""^(?=.*\\\\d)(?=.*[a-z])(?=.*[A-Z]).{8,}$"""'], {}), "('^(?=.*\\\\d)(?=.*[a-z])(?=.*[A-Z]).{8,}$')\n", (736, 778), False, 'import re\n'), ((911, 946), 're.compile', 're.compile', (['"""^[a-zA-Z0-9-_]{3,24}$"""'], {}), "('^[a-zA-Z0-9-_]{3,24}$')\n", (921, 946), False, 'import re\n')]
#!/usr/bin/env python """ # Author: <NAME> # Created Time : Tue 29 Sep 2020 01:41:23 PM CST # File Name: function.py # Description: """ import torch import numpy as np import os import scanpy as sc from anndata import AnnData from .data import load_data from .net.vae import VAE from .net.utils import EarlyStopping from .metrics import batch_entropy_mixing_score, silhouette_score from .logger import create_logger from .plot import embedding def SCALE( data_list, batch_categories=None, profile='RNA', join='inner', batch_key='batch', batch_name='batch', min_features=600, min_cells=3, n_top_features=2000, batch_size=64, lr=2e-4, max_iteration=30000, seed=124, gpu=0, outdir='output/', projection=None, repeat=False, impute=None, chunk_size=20000, ignore_umap=False, verbose=False, assess=False, show=True, ): """ Single-Cell integrative Analysis via Latent feature Extraction Parameters ---------- data_list A path list of AnnData matrices to concatenate with. Each matrix is referred to as a 'batch'. batch_categories Categories for the batch annotation. By default, use increasing numbers. profile Specify the single-cell profile, RNA or ATAC. Default: RNA. join Use intersection ('inner') or union ('outer') of variables of different batches. batch_key Add the batch annotation to obs using this key. By default, batch_key='batch'. batch_name Use this annotation in obs as batches for training model. Default: 'batch'. min_features Filtered out cells that are detected in less than min_features. Default: 600. min_cells Filtered out genes that are detected in less than min_cells. Default: 3. n_top_features Number of highly-variable genes to keep. Default: 2000. batch_size Number of samples per batch to load. Default: 64. lr Learning rate. Default: 2e-4. max_iteration Max iterations for training. Training one batch_size samples is one iteration. Default: 30000. seed Random seed for torch and numpy. Default: 124. gpu Index of GPU to use if GPU is available. Default: 0. outdir Output directory. Default: 'output/'. projection Use for new dataset projection. Input the folder containing the pre-trained model. If None, don't do projection. Default: None. repeat Use with projection. If False, concatenate the reference and projection datasets for downstream analysis. If True, only use projection datasets. Default: False. impute If True, calculate the imputed gene expression and store it at adata.layers['impute']. Default: False. chunk_size Number of samples from the same batch to transform. Default: 20000. ignore_umap If True, do not perform UMAP for visualization and leiden for clustering. Default: False. verbose Verbosity, True or False. Default: False. assess If True, calculate the entropy_batch_mixing score and silhouette score to evaluate integration results. Default: False. Returns ------- The output folder contains: adata.h5ad The AnnData matrice after batch effects removal. The low-dimensional representation of the data is stored at adata.obsm['latent']. checkpoint model.pt contains the variables of the model and config.pt contains the parameters of the model. log.txt Records raw data information, filter conditions, model parameters etc. umap.pdf UMAP plot for visualization. """ np.random.seed(seed) # seed torch.manual_seed(seed) if torch.cuda.is_available(): # cuda device device='cuda' torch.cuda.set_device(gpu) else: device='cpu' outdir = outdir+'/' os.makedirs(outdir+'/checkpoint', exist_ok=True) log = create_logger('', fh=outdir+'log.txt') if not projection: adata, trainloader, testloader = load_data( data_list, batch_categories, join=join, profile=profile, n_top_features=n_top_features, batch_size=batch_size, chunk_size=chunk_size, min_features=min_features, min_cells=min_cells, batch_name=batch_name, batch_key=batch_key, log=log ) early_stopping = EarlyStopping(patience=10, checkpoint_file=outdir+'/checkpoint/model.pt') x_dim, n_domain = adata.shape[1], len(adata.obs['batch'].cat.categories) # model config enc = [['fc', 1024, 1, 'relu'],['fc', 10, '', '']] # TO DO dec = [['fc', x_dim, n_domain, 'sigmoid']] model = VAE(enc, dec, n_domain=n_domain) log.info('model\n'+model.__repr__()) model.fit( trainloader, lr=lr, max_iteration=max_iteration, device=device, early_stopping=early_stopping, verbose=verbose, ) torch.save({'n_top_features':adata.var.index, 'enc':enc, 'dec':dec, 'n_domain':n_domain}, outdir+'/checkpoint/config.pt') else: state = torch.load(projection+'/checkpoint/config.pt') n_top_features, enc, dec, n_domain = state['n_top_features'], state['enc'], state['dec'], state['n_domain'] model = VAE(enc, dec, n_domain=n_domain) model.load_model(projection+'/checkpoint/model.pt') model.to(device) adata, trainloader, testloader = load_data( data_list, batch_categories, join='outer', profile=profile, chunk_size=chunk_size, n_top_features=n_top_features, min_cells=0, min_features=min_features, batch_name=batch_name, batch_key=batch_key, log = log ) # log.info('Processed dataset shape: {}'.format(adata.shape)) adata.obsm['latent'] = model.encodeBatch(testloader, device=device) # save latent rep if impute: adata.layers['impute'] = model.encodeBatch(testloader, out='impute', batch_id=impute, device=device) log.info('Output dir: {}'.format(outdir)) if projection and (not repeat): ref = sc.read_h5ad(projection+'/adata.h5ad') adata = AnnData.concatenate( ref, adata, batch_categories=['reference', 'query'], batch_key='projection', index_unique=None ) adata.write(outdir+'adata.h5ad', compression='gzip') if not ignore_umap: #and adata.shape[0]<1e6: log.info('Plot umap') sc.pp.neighbors(adata, n_neighbors=30, use_rep='latent') sc.tl.umap(adata, min_dist=0.1) sc.tl.leiden(adata) # UMAP visualization sc.settings.figdir = outdir sc.set_figure_params(dpi=80, figsize=(10,10), fontsize=20) cols = ['batch', 'celltype', 'leiden'] color = [c for c in cols if c in adata.obs] if len(color) > 0: if projection and (not repeat): embedding(adata, groupby='projection', save='.pdf', show=show) else: sc.pl.umap(adata, color=color, save='.pdf', wspace=0.4, ncols=4, show=show) if assess: if len(adata.obs['batch'].cat.categories) > 1: entropy_score = batch_entropy_mixing_score(adata.obsm['X_umap'], adata.obs['batch']) log.info('batch_entropy_mixing_score: {:.3f}'.format(entropy_score)) if 'celltype' in adata.obs: sil_score = silhouette_score(adata.obsm['X_umap'], adata.obs['celltype'].cat.codes) log.info("silhouette_score: {:.3f}".format(sil_score)) adata.write(outdir+'adata.h5ad', compression='gzip') return adata def label_transfer(ref, query, rep='latent', label='celltype'): """ Label transfer Parameters ----------- ref reference containing the projected representations and labels query query data to transfer label rep representations to train the classifier. Default is `latent` label label name. Defautl is `celltype` stored in ref.obs Returns -------- transfered label """ from sklearn.neighbors import KNeighborsClassifier X_train = ref.obsm[rep] y_train = ref.obs[label] X_test = query.obsm[rep] knn = knn = KNeighborsClassifier().fit(X_train, y_train) y_test = knn.predict(X_test) return y_test
[ "scanpy.tl.umap", "numpy.random.seed", "os.makedirs", "torch.manual_seed", "torch.load", "scanpy.pp.neighbors", "scanpy.read_h5ad", "scanpy.pl.umap", "torch.save", "scanpy.tl.leiden", "sklearn.neighbors.KNeighborsClassifier", "torch.cuda.is_available", "torch.cuda.set_device", "anndata.AnnData.concatenate", "scanpy.set_figure_params" ]
[((3790, 3810), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3804, 3810), True, 'import numpy as np\n'), ((3822, 3845), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3839, 3845), False, 'import torch\n'), ((3854, 3879), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3877, 3879), False, 'import torch\n'), ((4016, 4066), 'os.makedirs', 'os.makedirs', (["(outdir + '/checkpoint')"], {'exist_ok': '(True)'}), "(outdir + '/checkpoint', exist_ok=True)\n", (4027, 4066), False, 'import os\n'), ((3925, 3951), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu'], {}), '(gpu)\n', (3946, 3951), False, 'import torch\n'), ((5239, 5370), 'torch.save', 'torch.save', (["{'n_top_features': adata.var.index, 'enc': enc, 'dec': dec, 'n_domain':\n n_domain}", "(outdir + '/checkpoint/config.pt')"], {}), "({'n_top_features': adata.var.index, 'enc': enc, 'dec': dec,\n 'n_domain': n_domain}, outdir + '/checkpoint/config.pt')\n", (5249, 5370), False, 'import torch\n'), ((5392, 5440), 'torch.load', 'torch.load', (["(projection + '/checkpoint/config.pt')"], {}), "(projection + '/checkpoint/config.pt')\n", (5402, 5440), False, 'import torch\n'), ((6490, 6530), 'scanpy.read_h5ad', 'sc.read_h5ad', (["(projection + '/adata.h5ad')"], {}), "(projection + '/adata.h5ad')\n", (6502, 6530), True, 'import scanpy as sc\n'), ((6545, 6664), 'anndata.AnnData.concatenate', 'AnnData.concatenate', (['ref', 'adata'], {'batch_categories': "['reference', 'query']", 'batch_key': '"""projection"""', 'index_unique': 'None'}), "(ref, adata, batch_categories=['reference', 'query'],\n batch_key='projection', index_unique=None)\n", (6564, 6664), False, 'from anndata import AnnData\n'), ((6868, 6924), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'n_neighbors': '(30)', 'use_rep': '"""latent"""'}), "(adata, n_neighbors=30, use_rep='latent')\n", (6883, 6924), True, 'import scanpy as sc\n'), ((6933, 6964), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {'min_dist': '(0.1)'}), '(adata, min_dist=0.1)\n', (6943, 6964), True, 'import scanpy as sc\n'), ((6973, 6992), 'scanpy.tl.leiden', 'sc.tl.leiden', (['adata'], {}), '(adata)\n', (6985, 6992), True, 'import scanpy as sc\n'), ((7075, 7134), 'scanpy.set_figure_params', 'sc.set_figure_params', ([], {'dpi': '(80)', 'figsize': '(10, 10)', 'fontsize': '(20)'}), '(dpi=80, figsize=(10, 10), fontsize=20)\n', (7095, 7134), True, 'import scanpy as sc\n'), ((8706, 8728), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (8726, 8728), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7417, 7492), 'scanpy.pl.umap', 'sc.pl.umap', (['adata'], {'color': 'color', 'save': '""".pdf"""', 'wspace': '(0.4)', 'ncols': '(4)', 'show': 'show'}), "(adata, color=color, save='.pdf', wspace=0.4, ncols=4, show=show)\n", (7427, 7492), True, 'import scanpy as sc\n')]
from vyperlogix.hash import lists code_error = -404 code_noUpdate = -100 code_isUpdate = 400 code_revoked = -500 code_updated = 100 code_accepted = 200 code_valid = 300 code_invalid = -301 _info_site_address = 'www.VyperLogix.com' d_responses = lists.HashedLists2({code_error:'Warning: Unable to process your Registration.', code_invalid:'Your registration is not valid. Please make sure your payment has processed.', code_noUpdate:'You have the latest version.', code_revoked:'Your product key has been revoked. You may Register again to regain access.', code_updated:'Your registration has been updated and will be processed as quickly as possible.', code_accepted:'Your registration has been accepted; you should receive your Product Key shortly.', code_valid:'Your Product ID has been accepted; enjoy the power.', code_isUpdate:'There is a new version available you can download from %s.' % (_info_site_address) })
[ "vyperlogix.hash.lists.HashedLists2" ]
[((248, 979), 'vyperlogix.hash.lists.HashedLists2', 'lists.HashedLists2', (["{code_error: 'Warning: Unable to process your Registration.', code_invalid:\n 'Your registration is not valid. Please make sure your payment has processed.'\n , code_noUpdate: 'You have the latest version.', code_revoked:\n 'Your product key has been revoked. You may Register again to regain access.'\n , code_updated:\n 'Your registration has been updated and will be processed as quickly as possible.'\n , code_accepted:\n 'Your registration has been accepted; you should receive your Product Key shortly.'\n , code_valid: 'Your Product ID has been accepted; enjoy the power.',\n code_isUpdate: \n 'There is a new version available you can download from %s.' %\n _info_site_address}"], {}), "({code_error:\n 'Warning: Unable to process your Registration.', code_invalid:\n 'Your registration is not valid. Please make sure your payment has processed.'\n , code_noUpdate: 'You have the latest version.', code_revoked:\n 'Your product key has been revoked. You may Register again to regain access.'\n , code_updated:\n 'Your registration has been updated and will be processed as quickly as possible.'\n , code_accepted:\n 'Your registration has been accepted; you should receive your Product Key shortly.'\n , code_valid: 'Your Product ID has been accepted; enjoy the power.',\n code_isUpdate: \n 'There is a new version available you can download from %s.' %\n _info_site_address})\n", (266, 979), False, 'from vyperlogix.hash import lists\n')]
import os from distutils.core import setup from setuptools import find_packages, setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() def requirements(fname): for line in open(os.path.join(os.path.dirname(__file__), fname)): yield line.strip() setup( name='mwevents', version=read('VERSION').strip(), author='<NAME>', author_email='<EMAIL>', packages=find_packages(), scripts=[], url='http://pypi.python.org/pypi/mwevents', license=open('LICENSE').read(), description='Standardized public MediaWiki events for tools and Science.', long_description=read('README.rst'), install_requires=[ 'phpserialize', 'mediawiki-utilities', 'jsonable' ], test_suite='nose.collector', classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3", "Environment :: Other Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: General", "Topic :: Utilities", "Topic :: Scientific/Engineering" ], )
[ "os.path.dirname", "setuptools.find_packages" ]
[((431, 446), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (444, 446), False, 'from setuptools import find_packages, setup\n'), ((239, 264), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (254, 264), False, 'import os\n'), ((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n')]
# Generated by Django 2.2.13 on 2020-10-25 12:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('stream', '0002_viewcounter_name'), ] operations = [ migrations.AlterField( model_name='view', name='token', field=models.CharField(max_length=20, unique=True), ), ]
[ "django.db.models.CharField" ]
[((331, 375), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (347, 375), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Apr 10 14:19:04 2020 @author: corkep """ import numpy as np import numpy.testing as nt import unittest from math import pi import math from scipy.linalg import logm, expm from spatialmath.base.transformsNd import * from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2 from spatialmath.base import sym import matplotlib.pyplot as plt class TestND(unittest.TestCase): def test_iseye(self): self.assertTrue(iseye(np.eye(1))) self.assertTrue(iseye(np.eye(2))) self.assertTrue(iseye(np.eye(3))) self.assertTrue(iseye(np.eye(5))) self.assertFalse(iseye(2 * np.eye(3))) self.assertFalse(iseye(-np.eye(3))) self.assertFalse(iseye(np.array([[1, 0, 0], [0, 1, 0]]))) self.assertFalse(iseye(np.array([1, 0, 0]))) def test_r2t(self): # 3D R = rotx(0.3) T = r2t(R) nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0]) nt.assert_array_almost_equal(T[:3,:3], R) theta = sym.symbol('theta') R = rotx(theta) T = r2t(R) self.assertEqual(r2t(R).dtype, 'O') nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0]) # nt.assert_array_almost_equal(T[:3,:3], R) self.assertTrue((T[:3,:3] == R).all()) # 2D R = rot2(0.3) T = r2t(R) nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0]) nt.assert_array_almost_equal(T[:2,:2], R) theta = sym.symbol('theta') R = rot2(theta) T = r2t(R) self.assertEqual(r2t(R).dtype, 'O') nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0]) nt.assert_array_almost_equal(T[:2,:2], R) with self.assertRaises(ValueError): r2t(3) with self.assertRaises(ValueError): r2t(np.eye(3,4)) def test_t2r(self): # 3D t=[1,2,3] T = trotx(0.3, t=t) R = t2r(T) nt.assert_array_almost_equal(T[:3,:3], R) nt.assert_array_almost_equal(transl(T), np.array(t)) # 2D t=[1,2] T = trot2(0.3, t=t) R = t2r(T) nt.assert_array_almost_equal(T[:2,:2], R) nt.assert_array_almost_equal(transl2(T), np.array(t)) with self.assertRaises(ValueError): t2r(3) with self.assertRaises(ValueError): r2t(np.eye(3,4)) def test_rt2tr(self): # 3D R = rotx(0.2) t = [3, 4, 5] T = rt2tr(R, t) nt.assert_array_almost_equal(t2r(T), R) nt.assert_array_almost_equal(transl(T), np.array(t)) theta = sym.symbol('theta') R = rotx(theta) self.assertEqual(r2t(R).dtype, 'O') # 2D R = rot2(0.2) t = [3, 4] T = rt2tr(R, t) nt.assert_array_almost_equal(t2r(T), R) nt.assert_array_almost_equal(transl2(T), np.array(t)) theta = sym.symbol('theta') R = rot2(theta) self.assertEqual(r2t(R).dtype, 'O') with self.assertRaises(ValueError): rt2tr(3, 4) with self.assertRaises(ValueError): rt2tr(np.eye(3,4), [1,2,3,4]) def test_tr2rt(self): # 3D T = trotx(0.3, t=[1,2,3]) R, t = tr2rt(T) nt.assert_array_almost_equal(T[:3,:3], R) nt.assert_array_almost_equal(T[:3,3], t) # 2D T = trot2(0.3, t=[1,2]) R, t = tr2rt(T) nt.assert_array_almost_equal(T[:2,:2], R) nt.assert_array_almost_equal(T[:2,2], t) with self.assertRaises(ValueError): R, t = tr2rt(3) with self.assertRaises(ValueError): R, t = tr2rt(np.eye(3,4)) def test_checks(self): # 3D case, with rotation matrix R = np.eye(3) self.assertTrue(isR(R)) self.assertFalse(isrot2(R)) self.assertTrue(isrot(R)) self.assertFalse(ishom(R)) self.assertTrue(ishom2(R)) self.assertFalse(isrot2(R, True)) self.assertTrue(isrot(R, True)) self.assertFalse(ishom(R, True)) self.assertTrue(ishom2(R, True)) # 3D case, invalid rotation matrix R = np.eye(3) R[0, 1] = 2 self.assertFalse(isR(R)) self.assertFalse(isrot2(R)) self.assertTrue(isrot(R)) self.assertFalse(ishom(R)) self.assertTrue(ishom2(R)) self.assertFalse(isrot2(R, True)) self.assertFalse(isrot(R, True)) self.assertFalse(ishom(R, True)) self.assertFalse(ishom2(R, True)) # 3D case, with rotation matrix T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]) self.assertFalse(isR(T)) self.assertFalse(isrot2(T)) self.assertFalse(isrot(T)) self.assertTrue(ishom(T)) self.assertFalse(ishom2(T)) self.assertFalse(isrot2(T, True)) self.assertFalse(isrot(T, True)) self.assertTrue(ishom(T, True)) self.assertFalse(ishom2(T, True)) # 3D case, invalid rotation matrix T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]]) self.assertFalse(isR(T)) self.assertFalse(isrot2(T)) self.assertFalse(isrot(T)) self.assertTrue(ishom(T),) self.assertFalse(ishom2(T)) self.assertFalse(isrot2(T, True)) self.assertFalse(isrot(T, True)) self.assertFalse(ishom(T, True)) self.assertFalse(ishom2(T, True)) # 3D case, invalid bottom row T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]]) self.assertFalse(isR(T)) self.assertFalse(isrot2(T)) self.assertFalse(isrot(T)) self.assertTrue(ishom(T)) self.assertFalse(ishom2(T)) self.assertFalse(isrot2(T, True)) self.assertFalse(isrot(T, True)) self.assertFalse(ishom(T, True)) self.assertFalse(ishom2(T, True)) # skew matrices S = np.array([ [0, 2], [-2, 0]]) nt.assert_equal(isskew(S), True) S[0, 0] = 1 nt.assert_equal(isskew(S), False) S = np.array([ [0, -3, 2], [3, 0, -1], [-2, 1, 0]]) nt.assert_equal(isskew(S), True) S[0, 0] = 1 nt.assert_equal(isskew(S), False) def test_homog(self): nt.assert_almost_equal(e2h([1, 2, 3]), np.c_[1, 2, 3, 1].T) nt.assert_almost_equal(h2e([2, 4, 6, 2]), np.c_[1, 2, 3].T) def test_homtrans(self): #3D T = trotx(pi/2, t=[1,2,3]) v = [10,12,14] v2 = homtrans(T, v) nt.assert_almost_equal(v2, np.c_[11, -12, 15].T) v = np.c_[[10,12,14], [-3,-4,-5]] v2 = homtrans(T, v) nt.assert_almost_equal(v2, np.c_[[11, -12, 15], [-2,7,-1]]) #2D T = trot2(pi/2, t=[1,2]) v = [10,12] v2 = homtrans(T, v) nt.assert_almost_equal(v2, np.c_[-11, 12].T) v = np.c_[[10,12], [-3,-4]] v2 = homtrans(T, v) nt.assert_almost_equal(v2, np.c_[[-11, 12], [5, -1]]) with self.assertRaises(ValueError): T = trotx(pi/2, t=[1,2,3]) v = [10,12] v2 = homtrans(T, v) def test_skew(self): # 3D sk = skew([1, 2, 3]) self.assertEqual(sk.shape, (3,3)) nt.assert_almost_equal(sk + sk.T, np.zeros((3,3))) self.assertEqual(sk[2,1], 1) self.assertEqual(sk[0,2], 2) self.assertEqual(sk[1,0], 3) nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0]) # 2D sk = skew([1]) self.assertEqual(sk.shape, (2,2)) nt.assert_almost_equal(sk + sk.T, np.zeros((2,2))) self.assertEqual(sk[1,0], 1) nt.assert_almost_equal(sk.diagonal(), np.r_[0,0]) with self.assertRaises(ValueError): sk = skew([1,2]) def test_vex(self): # 3D t = [3, 4, 5] sk = skew(t) nt.assert_almost_equal(vex(sk), t) # 2D t = [3] sk = skew(t) nt.assert_almost_equal(vex(sk), t) def test_isskew(self): t = [3, 4, 5] sk = skew(t) self.assertTrue(isskew(sk)) sk[0,0] = 3 self.assertFalse(isskew(sk)) # 2D t = [3] sk = skew(t) self.assertTrue(isskew(sk)) sk[0,0] = 3 self.assertFalse(isskew(sk)) def test_isskewa(self): # 3D t = [3, 4, 5, 6, 7, 8] sk = skewa(t) self.assertTrue(isskewa(sk)) sk[0,0] = 3 self.assertFalse(isskew(sk)) sk = skewa(t) sk[3,3] = 3 self.assertFalse(isskew(sk)) # 2D t = [3, 4, 5] sk = skew(t) self.assertTrue(isskew(sk)) sk[0,0] = 3 self.assertFalse(isskew(sk)) sk = skewa(t) sk[2,2] = 3 self.assertFalse(isskew(sk)) def test_skewa(self): # 3D sk = skewa([1, 2, 3, 4, 5, 6]) self.assertEqual(sk.shape, (4,4)) nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0,0]) nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0,0]) nt.assert_almost_equal(sk[:3,3], [1, 2, 3]) nt.assert_almost_equal(vex(sk[:3,:3]), [4,5,6]) # 2D sk = skewa([1, 2, 3]) self.assertEqual(sk.shape, (3,3)) nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0]) nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0]) nt.assert_almost_equal(sk[:2,2], [1, 2]) nt.assert_almost_equal(vex(sk[:2,:2]), [3]) with self.assertRaises(ValueError): sk = skew([1,2]) def test_vexa(self): # 3D t = [1, 2, 3, 4, 5, 6] sk = skewa(t) nt.assert_almost_equal(vexa(sk), t) # 2D t = [1, 2, 3] sk = skewa(t) nt.assert_almost_equal(vexa(sk), t) def test_det(self): a = np.array([[1, 2], [3, 4]]) self.assertAlmostEqual(np.linalg.det(a), det(a)) x, y = sym.symbol('x y') a = np.array([[x, y], [y, x]]) self.assertEqual(det(a), x**2 - y**2) # ---------------------------------------------------------------------------------------# if __name__ == '__main__': unittest.main()
[ "unittest.main", "spatialmath.base.transforms3d.rotx", "spatialmath.base.transforms2d.ishom2", "spatialmath.base.transforms2d.isrot2", "numpy.testing.assert_almost_equal", "spatialmath.base.transforms2d.rot2", "numpy.zeros", "spatialmath.base.transforms3d.isrot", "spatialmath.base.transforms2d.trot2", "spatialmath.base.sym.symbol", "spatialmath.base.transforms3d.trotx", "numpy.array", "numpy.linalg.det", "spatialmath.base.transforms3d.transl", "numpy.eye", "numpy.testing.assert_array_almost_equal", "spatialmath.base.transforms3d.ishom", "spatialmath.base.transforms2d.transl2" ]
[((10322, 10337), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10335, 10337), False, 'import unittest\n'), ((994, 1003), 'spatialmath.base.transforms3d.rotx', 'rotx', (['(0.3)'], {}), '(0.3)\n', (998, 1003), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((1031, 1086), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:3, 3]', 'np.r_[0, 0, 0]'], {}), '(T[0:3, 3], np.r_[0, 0, 0])\n', (1059, 1086), True, 'import numpy.testing as nt\n'), ((1092, 1134), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (1120, 1134), True, 'import numpy.testing as nt\n'), ((1151, 1170), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (1161, 1170), False, 'from spatialmath.base import sym\n'), ((1183, 1194), 'spatialmath.base.transforms3d.rotx', 'rotx', (['theta'], {}), '(theta)\n', (1187, 1194), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((1266, 1321), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:3, 3]', 'np.r_[0, 0, 0]'], {}), '(T[0:3, 3], np.r_[0, 0, 0])\n', (1294, 1321), True, 'import numpy.testing as nt\n'), ((1444, 1453), 'spatialmath.base.transforms2d.rot2', 'rot2', (['(0.3)'], {}), '(0.3)\n', (1448, 1453), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((1481, 1533), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:2, 2]', 'np.r_[0, 0]'], {}), '(T[0:2, 2], np.r_[0, 0])\n', (1509, 1533), True, 'import numpy.testing as nt\n'), ((1540, 1582), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (1568, 1582), True, 'import numpy.testing as nt\n'), ((1599, 1618), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (1609, 1618), False, 'from spatialmath.base import sym\n'), ((1631, 1642), 'spatialmath.base.transforms2d.rot2', 'rot2', (['theta'], {}), '(theta)\n', (1635, 1642), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((1714, 1766), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:2, 2]', 'np.r_[0, 0]'], {}), '(T[0:2, 2], np.r_[0, 0])\n', (1742, 1766), True, 'import numpy.testing as nt\n'), ((1773, 1815), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (1801, 1815), True, 'import numpy.testing as nt\n'), ((2021, 2036), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(0.3)'], {'t': 't'}), '(0.3, t=t)\n', (2026, 2036), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2064, 2106), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (2092, 2106), True, 'import numpy.testing as nt\n'), ((2214, 2229), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(0.3)'], {'t': 't'}), '(0.3, t=t)\n', (2219, 2229), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2257, 2299), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (2285, 2299), True, 'import numpy.testing as nt\n'), ((2551, 2560), 'spatialmath.base.transforms3d.rotx', 'rotx', (['(0.2)'], {}), '(0.2)\n', (2555, 2560), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2733, 2752), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (2743, 2752), False, 'from spatialmath.base import sym\n'), ((2765, 2776), 'spatialmath.base.transforms3d.rotx', 'rotx', (['theta'], {}), '(theta)\n', (2769, 2776), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2847, 2856), 'spatialmath.base.transforms2d.rot2', 'rot2', (['(0.2)'], {}), '(0.2)\n', (2851, 2856), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3027, 3046), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (3037, 3046), False, 'from spatialmath.base import sym\n'), ((3059, 3070), 'spatialmath.base.transforms2d.rot2', 'rot2', (['theta'], {}), '(theta)\n', (3063, 3070), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3323, 3346), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(0.3)'], {'t': '[1, 2, 3]'}), '(0.3, t=[1, 2, 3])\n', (3328, 3346), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((3377, 3419), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (3405, 3419), True, 'import numpy.testing as nt\n'), ((3427, 3468), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, 3]', 't'], {}), '(T[:3, 3], t)\n', (3455, 3468), True, 'import numpy.testing as nt\n'), ((3494, 3514), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(0.3)'], {'t': '[1, 2]'}), '(0.3, t=[1, 2])\n', (3499, 3514), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3546, 3588), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (3574, 3588), True, 'import numpy.testing as nt\n'), ((3596, 3637), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, 2]', 't'], {}), '(T[:2, 2], t)\n', (3624, 3637), True, 'import numpy.testing as nt\n'), ((3874, 3883), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3880, 3883), True, 'import numpy as np\n'), ((4276, 4285), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4282, 4285), True, 'import numpy as np\n'), ((4698, 4764), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])\n', (4706, 4764), True, 'import numpy as np\n'), ((5160, 5226), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]])\n', (5168, 5226), True, 'import numpy as np\n'), ((5619, 5685), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]])\n', (5627, 5685), True, 'import numpy as np\n'), ((6063, 6090), 'numpy.array', 'np.array', (['[[0, 2], [-2, 0]]'], {}), '([[0, 2], [-2, 0]])\n', (6071, 6090), True, 'import numpy as np\n'), ((6232, 6278), 'numpy.array', 'np.array', (['[[0, -3, 2], [3, 0, -1], [-2, 1, 0]]'], {}), '([[0, -3, 2], [3, 0, -1], [-2, 1, 0]])\n', (6240, 6278), True, 'import numpy as np\n'), ((6639, 6665), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(pi / 2)'], {'t': '[1, 2, 3]'}), '(pi / 2, t=[1, 2, 3])\n', (6644, 6665), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((6721, 6769), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[11, -12, 15].T'], {}), '(v2, np.c_[11, -12, 15].T)\n', (6743, 6769), True, 'import numpy.testing as nt\n'), ((6848, 6909), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[[11, -12, 15], [-2, 7, -1]]'], {}), '(v2, np.c_[[11, -12, 15], [-2, 7, -1]])\n', (6870, 6909), True, 'import numpy.testing as nt\n'), ((6935, 6958), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(pi / 2)'], {'t': '[1, 2]'}), '(pi / 2, t=[1, 2])\n', (6940, 6958), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((7012, 7056), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[-11, 12].T'], {}), '(v2, np.c_[-11, 12].T)\n', (7034, 7056), True, 'import numpy.testing as nt\n'), ((7129, 7182), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[[-11, 12], [5, -1]]'], {}), '(v2, np.c_[[-11, 12], [5, -1]])\n', (7151, 7182), True, 'import numpy.testing as nt\n'), ((9186, 9238), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[-1, :]', 'np.r_[0, 0, 0, 0]'], {}), '(sk[-1, :], np.r_[0, 0, 0, 0])\n', (9208, 9238), True, 'import numpy.testing as nt\n'), ((9243, 9287), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[:3, 3]', '[1, 2, 3]'], {}), '(sk[:3, 3], [1, 2, 3])\n', (9265, 9287), True, 'import numpy.testing as nt\n'), ((9497, 9546), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[-1, :]', 'np.r_[0, 0, 0]'], {}), '(sk[-1, :], np.r_[0, 0, 0])\n', (9519, 9546), True, 'import numpy.testing as nt\n'), ((9552, 9593), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[:2, 2]', '[1, 2]'], {}), '(sk[:2, 2], [1, 2])\n', (9574, 9593), True, 'import numpy.testing as nt\n'), ((9996, 10022), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (10004, 10022), True, 'import numpy as np\n'), ((10096, 10113), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""x y"""'], {}), "('x y')\n", (10106, 10113), False, 'from spatialmath.base import sym\n'), ((10126, 10152), 'numpy.array', 'np.array', (['[[x, y], [y, x]]'], {}), '([[x, y], [y, x]])\n', (10134, 10152), True, 'import numpy as np\n'), ((2143, 2152), 'spatialmath.base.transforms3d.transl', 'transl', (['T'], {}), '(T)\n', (2149, 2152), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2154, 2165), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2162, 2165), True, 'import numpy as np\n'), ((2336, 2346), 'spatialmath.base.transforms2d.transl2', 'transl2', (['T'], {}), '(T)\n', (2343, 2346), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2348, 2359), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2356, 2359), True, 'import numpy as np\n'), ((2692, 2701), 'spatialmath.base.transforms3d.transl', 'transl', (['T'], {}), '(T)\n', (2698, 2701), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2703, 2714), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2711, 2714), True, 'import numpy as np\n'), ((2985, 2995), 'spatialmath.base.transforms2d.transl2', 'transl2', (['T'], {}), '(T)\n', (2992, 2995), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2997, 3008), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (3005, 3008), True, 'import numpy as np\n'), ((3941, 3950), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R'], {}), '(R)\n', (3947, 3950), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3976, 3984), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R'], {}), '(R)\n', (3981, 3984), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4011, 4019), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R'], {}), '(R)\n', (4016, 4019), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4045, 4054), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R'], {}), '(R)\n', (4051, 4054), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4081, 4096), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R', '(True)'], {}), '(R, True)\n', (4087, 4096), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4122, 4136), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R', '(True)'], {}), '(R, True)\n', (4127, 4136), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4163, 4177), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R', '(True)'], {}), '(R, True)\n', (4168, 4177), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4203, 4218), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R', '(True)'], {}), '(R, True)\n', (4209, 4218), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4364, 4373), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R'], {}), '(R)\n', (4370, 4373), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4399, 4407), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R'], {}), '(R)\n', (4404, 4407), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4434, 4442), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R'], {}), '(R)\n', (4439, 4442), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4468, 4477), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R'], {}), '(R)\n', (4474, 4477), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4504, 4519), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R', '(True)'], {}), '(R, True)\n', (4510, 4519), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4546, 4560), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R', '(True)'], {}), '(R, True)\n', (4551, 4560), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4587, 4601), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R', '(True)'], {}), '(R, True)\n', (4592, 4601), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4628, 4643), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R', '(True)'], {}), '(R, True)\n', (4634, 4643), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4823, 4832), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (4829, 4832), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4859, 4867), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (4864, 4867), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4893, 4901), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (4898, 4901), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4928, 4937), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (4934, 4937), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4964, 4979), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (4970, 4979), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5006, 5020), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5011, 5020), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5046, 5060), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5051, 5060), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5087, 5102), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (5093, 5102), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5285, 5294), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (5291, 5294), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5321, 5329), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (5326, 5329), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5355, 5363), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (5360, 5363), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5391, 5400), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (5397, 5400), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5427, 5442), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (5433, 5442), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5469, 5483), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5474, 5483), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5510, 5524), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5515, 5524), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5551, 5566), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (5557, 5566), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5744, 5753), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (5750, 5753), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5780, 5788), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (5785, 5788), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5814, 5822), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (5819, 5822), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5849, 5858), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (5855, 5858), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5885, 5900), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (5891, 5900), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5927, 5941), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5932, 5941), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5968, 5982), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5973, 5982), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((6009, 6024), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (6015, 6024), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((7246, 7272), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(pi / 2)'], {'t': '[1, 2, 3]'}), '(pi / 2, t=[1, 2, 3])\n', (7251, 7272), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((7477, 7493), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7485, 7493), True, 'import numpy as np\n'), ((7786, 7802), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (7794, 7802), True, 'import numpy as np\n'), ((10054, 10070), 'numpy.linalg.det', 'np.linalg.det', (['a'], {}), '(a)\n', (10067, 10070), True, 'import numpy as np\n'), ((595, 604), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (601, 604), True, 'import numpy as np\n'), ((637, 646), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (643, 646), True, 'import numpy as np\n'), ((679, 688), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (685, 688), True, 'import numpy as np\n'), ((721, 730), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (727, 730), True, 'import numpy as np\n'), ((856, 888), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 1, 0]])\n', (864, 888), True, 'import numpy as np\n'), ((922, 941), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (930, 941), True, 'import numpy as np\n'), ((1940, 1952), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (1946, 1952), True, 'import numpy as np\n'), ((2486, 2498), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (2492, 2498), True, 'import numpy as np\n'), ((3247, 3259), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (3253, 3259), True, 'import numpy as np\n'), ((3780, 3792), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (3786, 3792), True, 'import numpy as np\n'), ((769, 778), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (775, 778), True, 'import numpy as np\n'), ((813, 822), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (819, 822), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 27 16:40:49 2020 @author: krugefr1 """ import numpy as np import os try: import arthor except ImportError: arthor = None from rdkit import Chem from rdkit.Chem import rdSubstructLibrary import pickle import random import pandas as pd import copy from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep class Classification: def __init__(self, proj, datapath, dbpath, filename, chembldb, flimit=1e-3, MinClusterSize=20, clustering='UPGMA', calcDists=True, calcScores=False, smilesCol='Smiles', idCol='ID', onlyCompleteRings=False, useArthor=True): global arthor if not useArthor: arthor = None self.useArthor = useArthor self.proj = proj self.datapath = datapath self.dbpath = dbpath self.chembldb = chembldb self.flimit = flimit self.MinClusterSize = MinClusterSize self.clustering = clustering self.calcScores = calcScores self.calcDists = calcDists self.smilesCol = smilesCol self.idCol = idCol self.onlyCompleteRings = onlyCompleteRings # load data self.moldata_proj, self.distdata_proj = utilsDataPrep.PrepareData( self.proj, self.datapath, filename, distMeasure='Tanimoto', FP='Morgan2', calcDists=self.calcDists, smilesCol=smilesCol) if arthor is not None: if not os.path.isdir(dbpath): os.mkdir(dbpath) # set up project database for arthor substructure matching df = self.moldata_proj[[smilesCol, idCol]] df.to_csv('./arthor/{0}.smi'.format(self.proj), header=None, index=None, sep=' ') os.system('smi2atdb -j 0 -l {0}{1}.smi {0}{1}.atdb'.format( self.dbpath, self.proj)) os.system('atdb2fp -j 0 {0}{1}.atdb'.format( self.dbpath, self.proj)) self.proj_db = arthor.SubDb('{0}{1}.atdb'.format( self.dbpath, self.proj)) else: if type(dbpath) == rdSubstructLibrary.SubstructLibrary: self.proj_db = dbpath self.db_size = len(self.proj_db) else: if not os.path.exists(dbpath): print("creating database") mols = rdSubstructLibrary.CachedTrustedSmilesMolHolder() fps = rdSubstructLibrary.PatternHolder() for smi in self.moldata_proj[smilesCol]: m = Chem.MolFromSmiles(smi) mols.AddSmiles(Chem.MolToSmiles(m)) fps.AddFingerprint(Chem.PatternFingerprint(m)) self.proj_db = rdSubstructLibrary.SubstructLibrary( mols, fps) self.db_size = len(mols) pickle.dump(self.proj_db, open(dbpath, 'wb+')) else: self.proj_db = pickle.load(open(dbpath, 'rb')) self.db_size = len(self.proj_db) def AssignSeriesToMCS(self, MCSdict): # assign series to MCS of selected clusters smartslist = [v[2] for v in MCSdict.values()] MolAssign_prel = {} MolAssignment = {} for s in range(len(smartslist)): if arthor is not None: res = self.proj_db.search(smartslist[s]) mols = [int(i) for i in res.to_array()] else: mols = self.proj_db.GetMatches(Chem.MolFromSmarts( smartslist[s]), maxResults=self.db_size) MolAssign_prel[list(MCSdict.keys())[s]] = list(mols) # remove all series that are entirely in another series for key1 in MolAssign_prel.keys(): add = 1 for key2 in MolAssign_prel.keys(): if key2 != key1: if set(MolAssign_prel[key1]).issubset( set(MolAssign_prel[key2])): if set(MolAssign_prel[key2]).issubset( set(MolAssign_prel[key1])) and ( MCSdict[key1][0] >= MCSdict[key2][0]): add = 1 else: add = 0 break if add == 1 and MolAssign_prel[key1] not in MolAssignment.values(): MolAssignment[key1] = MolAssign_prel[key1] MolAssignment = { k: MolAssignment[k] for k in MolAssignment.keys() if len(MolAssignment[k]) > self.MinClusterSize } if self.calcScores: MCSdict = { k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2], MCSdict[k][3], MolAssignment[k]) for k in MolAssignment.keys() } else: MCSdict = { k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2], MolAssignment[k]) for k in MolAssignment.keys() } return MolAssignment, MCSdict def ApplyClustering(self): # apply custering and calculate MCS if self.clustering == 'UPGMA': MCSdict = UPGMAclustering.ApplyUPGMA( self.distdata_proj, self.moldata_proj, self.chembldb, self.flimit, self.MinClusterSize, self.calcScores, onlyCompleteRings=self.onlyCompleteRings, useArthor=self.useArthor) elif self.clustering == 'Butina': distdata = copy.deepcopy(self.distdata_proj) MCSdict = Butinaclustering.ApplyButina(distdata, self.moldata_proj, self.chembldb, self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor) else: print('Clustering algorithm not implemented.') return # assign series through substructure matching and filtering self.MolAssignment, self.MCSdict = self.AssignSeriesToMCS(MCSdict) # prepare and save output self.moldata_proj['ClusterID'] = [ list() for x in range(self.moldata_proj.shape[0]) ] for k, vs in self.MolAssignment.items(): for v in vs: self.moldata_proj['ClusterID'].iloc[v].append(k) if self.clustering == 'UPGMA': self.moldata_proj.to_csv('{0}moldata_UPGMA.csv'.format( self.datapath)) with open('{0}ClusterData_UPGMA.pkl'.format(self.datapath), 'wb') as fileout: pickle.dump(self.MCSdict, fileout) elif self.clustering == 'Butina': self.moldata_proj.to_csv('{0}moldata_Butina.csv'.format( self.datapath)) with open('{0}ClusterData_Butina.pkl'.format(self.datapath), 'wb') as fileout: pickle.dump(self.MCSdict, fileout) else: print('Clustering algorithm not implemented.') return def CalculatePerformance(self, seriescolumn='series assignment'): # benchmark the automated classification against a different (probably human-defined) classification # human-defined compound assignment is specified in the column "seriescolumn" of the dataframe "moldata" # automated classification assignment specified in dict "MolAssignment" # calculates F1 score of automatically-identified series w.r.t. to all human-defined series, then links # each automatically-identified series to the human-defined series with highest F1 score scaflist = list(set(self.moldata_proj['scaffold'].tolist())) scaflist.sort() intersect_matrix = np.zeros((len(scaflist), len(self.MolAssignment))) NMatchScaf = [] NMatchCluster = np.array([len(v) for v in self.MolAssignment.values()]) for scaf_ind in range(len(scaflist)): mollist = self.moldata_proj[self.idCol].loc[self.moldata_proj[ seriescolumn].map(lambda x: scaflist[scaf_ind] in x)].tolist() intersect_scaf = np.array([ len(list(set(mollist) & set(clusterlist))) for clusterlist in self.MolAssignment.values() ]) intersect_matrix[scaf_ind, :] = intersect_scaf NMatchScaf.append(len(mollist)) NMatchScaf = np.array(NMatchScaf) RecallMatrix = intersect_matrix / NMatchScaf[:, None] PrecMatrix = intersect_matrix / NMatchCluster[None, :] Fscore = (2 * RecallMatrix * PrecMatrix) / (RecallMatrix + PrecMatrix + 1e-9) maxscore = np.argmax(Fscore, axis=0) PrecVector = np.zeros(len(self.MolAssignment)) RecallVector = np.zeros(len(self.MolAssignment)) FscoreVector = np.zeros(len(self.MolAssignment)) LinkVector = [] for col in range(len(self.MolAssignment)): PrecVector[col] = PrecMatrix[maxscore[col], col] RecallVector[col] = RecallMatrix[maxscore[col], col] FscoreVector[col] = Fscore[maxscore[col], col] LinkVector.append((list(self.MolAssignment.keys())[col], scaflist[maxscore[col]])) LinkVector = np.array(LinkVector) self.PerformanceClusters = { 'recall': RecallVector, 'precision': PrecVector, 'Fscore': FscoreVector, 'linked series': LinkVector } if self.clustering == 'UPGMA': with open('{0}PerformanceData_UPGMA.pkl'.format(self.datapath), 'wb') as fileout: pickle.dump(self.PerformanceClusters, fileout) elif self.clustering == 'Butina': with open('{0}PerformanceData_Butina.pkl'.format(self.datapath), 'wb') as fileout: pickle.dump(self.PerformanceClusters, fileout) else: print('Clustering algorithm not implemented.') return def ClassificationCrossValidation(self, fraction_sample, N_sample): samplerange = np.arange(len(self.moldata_proj)) invfrac = 1 / fraction_sample self.SampledSeries = {} for i in range(N_sample): # random sampling random.seed((i + 1) * 10) molinds = random.sample(population=samplerange.tolist(), k=int( len(samplerange.tolist()) // invfrac)) moldata_sample = self.moldata_proj.iloc[molinds] distdata_sample = self.distdata_proj[molinds, :] distdata_sample = distdata_sample[:, molinds] # apply custering and calculate MCS if self.clustering == 'UPGMA': MCSdict_sampled = UPGMAclustering.ApplyUPGMA( distdata_sample, moldata_sample, self.chembldb, self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor) elif self.clustering == 'Butina': MCSdict_sampled = Butinaclustering.ApplyButina( distdata_sample, moldata_sample, self.chembldb, self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor) else: print('Clustering algorithm not implemented.') return # assign series through substructure matching and filtering MolAssignment_sampled, MCSdict_sampled = self.AssignSeriesToMCS( MCSdict_sampled) self.SampledSeries[i] = MCSdict_sampled if self.clustering == 'UPGMA': with open( '{0}SampledSeries{1}_UPGMA.pkl'.format( self.datapath, int(fraction_sample * 100)), 'wb') as fileout: pickle.dump(self.SampledSeries, fileout) elif self.clustering == 'Butina': with open( '{0}SampledSeries{1}_Butina.pkl'.format( self.datapath, int(fraction_sample * 100)), 'wb') as fileout: pickle.dump(self.SampledSeries, fileout) else: print('Clustering algorithm not implemented.') return return def EvaluationCrossValidation(self): # Compare the classification obtained from sampling ("SampledSeries") against the original classification ("MCSdict") self.EvalCrossval = pd.DataFrame( columns=['series id', 'repetition', 'fscore']) for rep in self.SampledSeries.keys(): rep_dict = self.SampledSeries[rep] keylist = [k for k in rep_dict.keys()] for k in self.MCSdict.keys(): intersect = [ len(set(self.MCSdict[k][-1]) & set(v[-1])) for v in rep_dict.values() ] recall = np.array([ intersect[i] / len(rep_dict[keylist[i]][-1]) for i in range(len(keylist)) ]) precision = np.array(intersect) / len(self.MCSdict[k][-1]) fscore = max(2 * recall * precision / (recall + precision + 1e-9)) row = [int(k), int(rep), fscore] self.EvalCrossval.loc[len(self.EvalCrossval)] = row self.EvalCrossval['series id'] = self.EvalCrossval['series id'].apply( int)
[ "rdkit.Chem.PatternFingerprint", "os.mkdir", "pickle.dump", "numpy.argmax", "rdkit.Chem.MolToSmiles", "pandas.DataFrame", "os.path.exists", "random.seed", "rdkit.Chem.rdSubstructLibrary.PatternHolder", "copy.deepcopy", "automated_series_classification.Butinaclustering.ApplyButina", "automated_series_classification.utilsDataPrep.PrepareData", "rdkit.Chem.rdSubstructLibrary.CachedTrustedSmilesMolHolder", "os.path.isdir", "rdkit.Chem.MolFromSmarts", "rdkit.Chem.rdSubstructLibrary.SubstructLibrary", "numpy.array", "automated_series_classification.UPGMAclustering.ApplyUPGMA", "rdkit.Chem.MolFromSmiles" ]
[((1502, 1653), 'automated_series_classification.utilsDataPrep.PrepareData', 'utilsDataPrep.PrepareData', (['self.proj', 'self.datapath', 'filename'], {'distMeasure': '"""Tanimoto"""', 'FP': '"""Morgan2"""', 'calcDists': 'self.calcDists', 'smilesCol': 'smilesCol'}), "(self.proj, self.datapath, filename, distMeasure=\n 'Tanimoto', FP='Morgan2', calcDists=self.calcDists, smilesCol=smilesCol)\n", (1527, 1653), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((9204, 9224), 'numpy.array', 'np.array', (['NMatchScaf'], {}), '(NMatchScaf)\n', (9212, 9224), True, 'import numpy as np\n'), ((9507, 9532), 'numpy.argmax', 'np.argmax', (['Fscore'], {'axis': '(0)'}), '(Fscore, axis=0)\n', (9516, 9532), True, 'import numpy as np\n'), ((10112, 10132), 'numpy.array', 'np.array', (['LinkVector'], {}), '(LinkVector)\n', (10120, 10132), True, 'import numpy as np\n'), ((13547, 13606), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['series id', 'repetition', 'fscore']"}), "(columns=['series id', 'repetition', 'fscore'])\n", (13559, 13606), True, 'import pandas as pd\n'), ((5708, 5916), 'automated_series_classification.UPGMAclustering.ApplyUPGMA', 'UPGMAclustering.ApplyUPGMA', (['self.distdata_proj', 'self.moldata_proj', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'onlyCompleteRings': 'self.onlyCompleteRings', 'useArthor': 'self.useArthor'}), '(self.distdata_proj, self.moldata_proj, self.\n chembldb, self.flimit, self.MinClusterSize, self.calcScores,\n onlyCompleteRings=self.onlyCompleteRings, useArthor=self.useArthor)\n', (5734, 5916), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((11138, 11163), 'random.seed', 'random.seed', (['((i + 1) * 10)'], {}), '((i + 1) * 10)\n', (11149, 11163), False, 'import random\n'), ((1784, 1805), 'os.path.isdir', 'os.path.isdir', (['dbpath'], {}), '(dbpath)\n', (1797, 1805), False, 'import os\n'), ((1823, 1839), 'os.mkdir', 'os.mkdir', (['dbpath'], {}), '(dbpath)\n', (1831, 1839), False, 'import os\n'), ((6102, 6135), 'copy.deepcopy', 'copy.deepcopy', (['self.distdata_proj'], {}), '(self.distdata_proj)\n', (6115, 6135), False, 'import copy\n'), ((6158, 6316), 'automated_series_classification.Butinaclustering.ApplyButina', 'Butinaclustering.ApplyButina', (['distdata', 'self.moldata_proj', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata, self.moldata_proj, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (6186, 6316), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((7407, 7441), 'pickle.dump', 'pickle.dump', (['self.MCSdict', 'fileout'], {}), '(self.MCSdict, fileout)\n', (7418, 7441), False, 'import pickle\n'), ((10501, 10547), 'pickle.dump', 'pickle.dump', (['self.PerformanceClusters', 'fileout'], {}), '(self.PerformanceClusters, fileout)\n', (10512, 10547), False, 'import pickle\n'), ((11661, 11821), 'automated_series_classification.UPGMAclustering.ApplyUPGMA', 'UPGMAclustering.ApplyUPGMA', (['distdata_sample', 'moldata_sample', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata_sample, moldata_sample, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (11687, 11821), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((12913, 12953), 'pickle.dump', 'pickle.dump', (['self.SampledSeries', 'fileout'], {}), '(self.SampledSeries, fileout)\n', (12924, 12953), False, 'import pickle\n'), ((2650, 2672), 'os.path.exists', 'os.path.exists', (['dbpath'], {}), '(dbpath)\n', (2664, 2672), False, 'import os\n'), ((2748, 2797), 'rdkit.Chem.rdSubstructLibrary.CachedTrustedSmilesMolHolder', 'rdSubstructLibrary.CachedTrustedSmilesMolHolder', ([], {}), '()\n', (2795, 2797), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((2824, 2858), 'rdkit.Chem.rdSubstructLibrary.PatternHolder', 'rdSubstructLibrary.PatternHolder', ([], {}), '()\n', (2856, 2858), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((3138, 3184), 'rdkit.Chem.rdSubstructLibrary.SubstructLibrary', 'rdSubstructLibrary.SubstructLibrary', (['mols', 'fps'], {}), '(mols, fps)\n', (3173, 3184), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((3922, 3955), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['smartslist[s]'], {}), '(smartslist[s])\n', (3940, 3955), False, 'from rdkit import Chem\n'), ((7714, 7748), 'pickle.dump', 'pickle.dump', (['self.MCSdict', 'fileout'], {}), '(self.MCSdict, fileout)\n', (7725, 7748), False, 'import pickle\n'), ((10723, 10769), 'pickle.dump', 'pickle.dump', (['self.PerformanceClusters', 'fileout'], {}), '(self.PerformanceClusters, fileout)\n', (10734, 10769), False, 'import pickle\n'), ((12034, 12196), 'automated_series_classification.Butinaclustering.ApplyButina', 'Butinaclustering.ApplyButina', (['distdata_sample', 'moldata_sample', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata_sample, moldata_sample, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (12062, 12196), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((13202, 13242), 'pickle.dump', 'pickle.dump', (['self.SampledSeries', 'fileout'], {}), '(self.SampledSeries, fileout)\n', (13213, 13242), False, 'import pickle\n'), ((14161, 14180), 'numpy.array', 'np.array', (['intersect'], {}), '(intersect)\n', (14169, 14180), True, 'import numpy as np\n'), ((2948, 2971), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (2966, 2971), False, 'from rdkit import Chem\n'), ((3011, 3030), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (3027, 3030), False, 'from rdkit import Chem\n'), ((3075, 3101), 'rdkit.Chem.PatternFingerprint', 'Chem.PatternFingerprint', (['m'], {}), '(m)\n', (3098, 3101), False, 'from rdkit import Chem\n')]
import openliveq as olq import pytest import os from .test_base import TestBase class TestCollection(TestBase): def test_df(self, c): result = c.df assert result["社会保険事務所"] == 1 assert result["国民年金"] == 4 def test_cf(self, c): result = c.cf assert result["社会保険事務所"] > 1 assert result["国民年金"] > 4 @pytest.fixture def c(self, parsed_questions): result = olq.Collection() for ws in parsed_questions: result.add(ws) return result @pytest.fixture def ff(self): return olq.FeatureFactory() @pytest.fixture def parsed_questions(self, ff, questions): result = [] for q in questions: result.append(ff.parse_question(q)) return result
[ "openliveq.Collection", "openliveq.FeatureFactory" ]
[((427, 443), 'openliveq.Collection', 'olq.Collection', ([], {}), '()\n', (441, 443), True, 'import openliveq as olq\n'), ((583, 603), 'openliveq.FeatureFactory', 'olq.FeatureFactory', ([], {}), '()\n', (601, 603), True, 'import openliveq as olq\n')]
# Copyright (c) 2010 Resolver Systems Ltd. # All Rights Reserved # try: import unittest2 as unittest except ImportError: import unittest from functionaltest import FunctionalTest import key_codes from textwrap import dedent class Test_2734_ClearCells(FunctionalTest): def test_delete_key_clears_selected_cells(self): self.assert_key_deletes_cells(key_codes.DELETE) def test_backspace_key_clears_selected_cells(self): self.assert_key_deletes_cells(key_codes.BACKSPACE) def assert_key_deletes_cells(self, key_code): # * Harold logs in and creates a new sheet self.login_and_create_new_sheet() # * He enters some data in A1:A3 self.enter_cell_text(1, 1, 'a1') self.enter_cell_text(1, 2, 'a2') self.enter_cell_text(1, 3, 'a3') self.wait_for_cell_value(1, 3, 'a3') # * He clicks on A1 and hits delete self.click_on_cell(1, 1) self.human_key_press(key_code) # * He sees the value in A1 disappear while the others remain self.wait_for_cell_value(1, 1, '') self.wait_for_cell_value(1, 2, 'a2') self.wait_for_cell_value(1, 3, 'a3') # * He selects the range a2:a3 self.select_range_with_shift_click((1, 2), (1, 3)) # He hits delete self.human_key_press(key_code) # * He sees that all the cells are now cleared self.wait_for_cell_value(1, 1, '') self.wait_for_cell_value(1, 2, '') self.wait_for_cell_value(1, 3, '') def test_delete_key_while_editing_still_does_what_it_should(self): # * Harold logs in and creates a new sheet self.login_and_create_new_sheet() # * He enters three characters in A1 self.open_cell_for_editing(1, 1) self.human_key_press(key_codes.NUMBER_1) self.human_key_press(key_codes.NUMBER_2) self.human_key_press(key_codes.NUMBER_3) # * He moves left twice self.human_key_press(key_codes.LEFT) self.human_key_press(key_codes.LEFT) # He hits delete self.human_key_press(key_codes.DELETE) # the middle character is now missing self.wait_for_cell_editor_content('13') def test_backspace_key_while_editing_still_does_what_it_should(self): # * Harold logs in and creates a new sheet self.login_and_create_new_sheet() # * He enters three characters in A1 self.open_cell_for_editing(1, 1) self.human_key_press(key_codes.NUMBER_1) self.human_key_press(key_codes.NUMBER_2) self.human_key_press(key_codes.NUMBER_3) # * He moves left once self.human_key_press(key_codes.LEFT) # He hits backspace self.human_key_press(key_codes.BACKSPACE) # the middle character is now missing self.wait_for_cell_editor_content('13') def test_can_clear_cell_from_usercode(self): # * Harold logs in and creates a new sheet self.login_and_create_new_sheet() # * He enters some data in A1:A3 self.enter_cell_text(1, 1, 'a1') self.enter_cell_text(1, 2, 'a2') self.enter_cell_text(1, 3, 'a3') self.wait_for_cell_value(1, 3, 'a3') # * He tries to use the clear() function from usercode on a cell # and then tries to access some of the supposedly cleared attributes of the cell self.prepend_usercode(dedent(''' worksheet.a1.error = 'harold puts a deliberate pointless error in' worksheet.a1.clear() worksheet.b1.formula = str(worksheet.a1.value) worksheet.b2.formula = str(worksheet.a1.formula) worksheet.b3.formula = str(worksheet.a1.formatted_value) worksheet.b4.formula = str(worksheet.a1.error) ''')) # * He sees the value in a1 disappear self.wait_for_cell_value(1, 1, '') self.wait_for_cell_value(1, 2, 'a2') self.wait_for_cell_value(1, 3, 'a3') # * He sees his little investigations also produce the expected results self.wait_for_cell_value(2, 1, '<undefined>') self.wait_for_cell_value(2, 2, 'None') self.wait_for_cell_value(2, 3, '') self.wait_for_cell_value(2, 4, 'None') def test_can_clear_cell_range_from_usercode(self): # * Harold logs in and creates a new sheet self.login_and_create_new_sheet() # * He enters some data in A1:A3 self.enter_cell_text(1, 1, 'a1') self.enter_cell_text(1, 2, 'a2') self.enter_cell_text(1, 3, 'a3') self.wait_for_cell_value(1, 3, 'a3') # * He tries to use the clear() function from usercode on a cell range self.prepend_usercode(dedent(''' worksheet.a1.error = 'harold puts a deliberate pointless error in' worksheet.a2.error = 'harold puts another deliberate pointless error in' worksheet.cell_range("a1:a2").clear() worksheet.b1.formula = str(worksheet.a1.value) worksheet.b2.formula = str(worksheet.a1.formula) worksheet.b3.formula = str(worksheet.a1.formatted_value) worksheet.b4.formula = str(worksheet.a1.error) worksheet.c1.formula = str(worksheet.a2.value) worksheet.c2.formula = str(worksheet.a2.formula) worksheet.c3.formula = str(worksheet.a2.formatted_value) worksheet.c4.formula = str(worksheet.a2.error) ''')) # * He sees the value in a1 and a2 disappear self.wait_for_cell_value(1, 1, '') self.wait_for_cell_value(1, 2, '') self.wait_for_cell_value(1, 3, 'a3') # * He sees his little investigations also produce the expected results self.wait_for_cell_value(2, 1, '<undefined>') self.wait_for_cell_value(2, 2, 'None') self.wait_for_cell_value(2, 3, '') self.wait_for_cell_value(2, 4, 'None') self.wait_for_cell_value(3, 1, '<undefined>') self.wait_for_cell_value(3, 2, 'None') self.wait_for_cell_value(3, 3, '') self.wait_for_cell_value(3, 4, 'None')
[ "textwrap.dedent" ]
[((3424, 3819), 'textwrap.dedent', 'dedent', (['"""\n worksheet.a1.error = \'harold puts a deliberate pointless error in\'\n\n worksheet.a1.clear()\n\n worksheet.b1.formula = str(worksheet.a1.value)\n worksheet.b2.formula = str(worksheet.a1.formula)\n worksheet.b3.formula = str(worksheet.a1.formatted_value)\n worksheet.b4.formula = str(worksheet.a1.error)\n """'], {}), '(\n """\n worksheet.a1.error = \'harold puts a deliberate pointless error in\'\n\n worksheet.a1.clear()\n\n worksheet.b1.formula = str(worksheet.a1.value)\n worksheet.b2.formula = str(worksheet.a1.formula)\n worksheet.b3.formula = str(worksheet.a1.formatted_value)\n worksheet.b4.formula = str(worksheet.a1.error)\n """\n )\n', (3430, 3819), False, 'from textwrap import dedent\n'), ((4733, 5478), 'textwrap.dedent', 'dedent', (['"""\n worksheet.a1.error = \'harold puts a deliberate pointless error in\'\n worksheet.a2.error = \'harold puts another deliberate pointless error in\'\n\n worksheet.cell_range("a1:a2").clear()\n\n worksheet.b1.formula = str(worksheet.a1.value)\n worksheet.b2.formula = str(worksheet.a1.formula)\n worksheet.b3.formula = str(worksheet.a1.formatted_value)\n worksheet.b4.formula = str(worksheet.a1.error)\n worksheet.c1.formula = str(worksheet.a2.value)\n worksheet.c2.formula = str(worksheet.a2.formula)\n worksheet.c3.formula = str(worksheet.a2.formatted_value)\n worksheet.c4.formula = str(worksheet.a2.error)\n """'], {}), '(\n """\n worksheet.a1.error = \'harold puts a deliberate pointless error in\'\n worksheet.a2.error = \'harold puts another deliberate pointless error in\'\n\n worksheet.cell_range("a1:a2").clear()\n\n worksheet.b1.formula = str(worksheet.a1.value)\n worksheet.b2.formula = str(worksheet.a1.formula)\n worksheet.b3.formula = str(worksheet.a1.formatted_value)\n worksheet.b4.formula = str(worksheet.a1.error)\n worksheet.c1.formula = str(worksheet.a2.value)\n worksheet.c2.formula = str(worksheet.a2.formula)\n worksheet.c3.formula = str(worksheet.a2.formatted_value)\n worksheet.c4.formula = str(worksheet.a2.error)\n """\n )\n', (4739, 5478), False, 'from textwrap import dedent\n')]
""" Helper script to create config files for BlenderProc. """ import os import yaml import random import numpy as np import binascii # these paths have to be manually set before creating a config BLENDERPROC_ROOT = '' # /path/to/BlenderProc SHAPENET_ROOT = '' # /path/to/ShapeNetCore.v2 SUNCG_ROOT = '' # /path/to/suncg DEST = '' # /path/to/output_folder def get_random_house_path(): with open(os.path.join(BLENDERPROC_ROOT, 'suncg_houses.txt'), 'r') as f: house_paths = f.readlines() return os.path.join(SUNCG_ROOT, random.choice(house_paths)).strip() def get_base_cfg(): with open(os.path.join(BLENDERPROC_ROOT, 'base_config.yaml'), 'r') as f: base_cfg = yaml.load(f) return base_cfg def get_random_obj_configs(n=10): obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = [], [], [], [], [], [] with open(os.path.join(BLENDERPROC_ROOT, 'shapenet_objects.txt'), 'r') as f: obj_paths = f.readlines() for i in range(n): scale = np.random.uniform(0.1, 0.4) recalculate_uv = np.random.uniform(0., 1.) obj_base_cfg = { "module": "loader.CustomObjectLoader", "config": { "path": os.path.join(SHAPENET_ROOT, random.choice(obj_paths)[:-1]), "scale": [scale, scale, scale], "add_properties": { "cp_object_to_scale": True, "cp_sample_pose": True, "cp_category_id": int(i+2), "cp_coarse_grained_class": "selected_object", "cp_type": "Object", "cp_physics": True, "cp_cc_texture": True }, } } scale_base_cfg = { "module": "manipulators.EntityManipulator", "config": { "selector": { "provider": "getter.Entity", "conditions": { "cp_category_id": int(i+2), } }, "scale": [scale, scale, scale], "cf_add_modifier": { "name": "Solidify", "thickness": 0.0025 }, "cf_randomize_materials": { "randomization_level": 1., "materials_to_replace_with": { "provider": "getter.Material", "conditions": { "cp_is_cc_texture": True } } }, } } mat_base_cfg = { "module": "manipulators.MaterialManipulator", "config": { "selector": { "provider": "getter.Entity", "conditions": { "cp_category_id": int(i + 2), } }, "cf_set_Roughness": { "provider": "sampler.Value", "type": "float", "min": 0.05, "max": 0.5, }, "cf_set_Specular": { "provider": "sampler.Value", "type": "float", "min": 0.5, "max": 1.0, }, "cf_color_link_to_displacement": { "provider": "sampler.Value", "type": "float", "min": 0.001, "max": 0.15 }, "cf_set_Alpha": 1.0, "mode": "once_for_each" } } sampler_base_cfg = { "module": "object.OnSurfaceSampler", "config": { "objects_to_sample": { "provider": "getter.Entity", "conditions": { "cp_category_id": int(i+2) } }, "surface": { "provider": "getter.Entity", "index": 0, "conditions": { "name": "selected_table" } }, "pos_sampler": { "provider": "sampler.UpperRegionSampler", "to_sample_on": { "provider": "getter.Entity", "index": 0, "conditions": { "name": "selected_table" } }, "min_height": 1, "max_height": 4, "face_sample_range": [0.4, 0.6], "use_ray_trace_check": False, }, "min_distance": 0.1, "max_distance": 1.5, "rot_sampler": { "provider": "sampler.Uniform3d", "min": [0, 0, 0], "max": [6.28, 6.28, 6.28] } } } physics_base_cfg = { "module": "object.PhysicsPositioning", "config": { "min_simulation_time": 0.5, "max_simulation_time": 2, "check_object_interval": 1, } } grav_off_cfg = { "module": "manipulators.EntityManipulator", "config": { "selector": { "provider": "getter.Entity", "conditions": { "cp_category_id": int(i + 2), } }, "cp_physics": False, } } scale_base_cfg["config"]["cf_add_uv_mapping"] = { "projection": "cylinder", "forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False } mat_base_cfg["config"]["cf_add_uv_mapping"] = { "projection": "cylinder", "forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False } obj_configs.append(obj_base_cfg) scale_configs.append(scale_base_cfg) mat_configs.append(mat_base_cfg) sample_configs.append(sampler_base_cfg) physic_configs.append(physics_base_cfg) gravoff_configs.append(grav_off_cfg) return obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs def create_config(): base_cfg = get_base_cfg() baseline = 0.065 focal_length_x = 541.14 focal_length_y = 541.14 base_cfg['modules'][8]['config']['intrinsics']['interocular_distance'] = baseline base_cfg['modules'][8]['config']['intrinsics']['cam_K'] = [focal_length_x, 0.0, 320.0, 0.0, focal_length_y, 240.0, 0.0, 0.0, 1.0] # add objects num_objs = np.random.randint(5, 12) obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = get_random_obj_configs(n=num_objs) for obj_config, scale_config, mat_config, sample_config, physics_config, gravoff_config in zip(obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs): base_cfg['modules'].insert(6, obj_config) base_cfg['modules'].insert(7, scale_config) base_cfg['modules'].insert(8, sample_config) base_cfg['modules'].insert(9, physics_config) base_cfg['modules'].insert(10, gravoff_config) # set house path base_cfg['modules'][1]['config']['path'] = get_random_house_path() # replace house with cctextures house_cc_texture_config = { "module": "manipulators.EntityManipulator", "config": { "selector": { "provider": "getter.Entity", "conditions": { "type": "MESH" } }, "cf_randomize_materials": { "randomization_level": 0.4, "materials_to_replace_with": { "provider": "getter.Material", "random_samples": 1, "conditions": { "cp_is_cc_texture": True # this will return one random loaded cc textures } } } } } base_cfg['modules'].insert(4, house_cc_texture_config) # set output dir output_prefix = os.urandom(20) output_prefix = binascii.hexlify(output_prefix) output_prefix = str(output_prefix)[2:-1] output_path = os.path.join(DEST, output_prefix) os.makedirs(output_path) base_cfg['modules'][0]['config']['global']['output_dir'] = output_path with open(os.path.join(DEST, output_prefix + '/config.yaml'), 'w') as f: yaml.dump(base_cfg, f) return os.path.join(DEST, output_prefix + '/config.yaml') if __name__ == '__main__': path = create_config() print(path)
[ "numpy.random.uniform", "yaml.load", "os.makedirs", "binascii.hexlify", "yaml.dump", "random.choice", "numpy.random.randint", "os.path.join", "os.urandom" ]
[((7027, 7051), 'numpy.random.randint', 'np.random.randint', (['(5)', '(12)'], {}), '(5, 12)\n', (7044, 7051), True, 'import numpy as np\n'), ((8462, 8476), 'os.urandom', 'os.urandom', (['(20)'], {}), '(20)\n', (8472, 8476), False, 'import os\n'), ((8497, 8528), 'binascii.hexlify', 'binascii.hexlify', (['output_prefix'], {}), '(output_prefix)\n', (8513, 8528), False, 'import binascii\n'), ((8592, 8625), 'os.path.join', 'os.path.join', (['DEST', 'output_prefix'], {}), '(DEST, output_prefix)\n', (8604, 8625), False, 'import os\n'), ((8630, 8654), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (8641, 8654), False, 'import os\n'), ((8850, 8900), 'os.path.join', 'os.path.join', (['DEST', "(output_prefix + '/config.yaml')"], {}), "(DEST, output_prefix + '/config.yaml')\n", (8862, 8900), False, 'import os\n'), ((695, 707), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (704, 707), False, 'import yaml\n'), ((1036, 1063), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.4)'], {}), '(0.1, 0.4)\n', (1053, 1063), True, 'import numpy as np\n'), ((1089, 1116), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1106, 1116), True, 'import numpy as np\n'), ((8816, 8838), 'yaml.dump', 'yaml.dump', (['base_cfg', 'f'], {}), '(base_cfg, f)\n', (8825, 8838), False, 'import yaml\n'), ((406, 456), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""suncg_houses.txt"""'], {}), "(BLENDERPROC_ROOT, 'suncg_houses.txt')\n", (418, 456), False, 'import os\n'), ((613, 663), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""base_config.yaml"""'], {}), "(BLENDERPROC_ROOT, 'base_config.yaml')\n", (625, 663), False, 'import os\n'), ((896, 950), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""shapenet_objects.txt"""'], {}), "(BLENDERPROC_ROOT, 'shapenet_objects.txt')\n", (908, 950), False, 'import os\n'), ((8745, 8795), 'os.path.join', 'os.path.join', (['DEST', "(output_prefix + '/config.yaml')"], {}), "(DEST, output_prefix + '/config.yaml')\n", (8757, 8795), False, 'import os\n'), ((541, 567), 'random.choice', 'random.choice', (['house_paths'], {}), '(house_paths)\n', (554, 567), False, 'import random\n'), ((1267, 1291), 'random.choice', 'random.choice', (['obj_paths'], {}), '(obj_paths)\n', (1280, 1291), False, 'import random\n')]
import sys sys.setrecursionlimit(10000000) class LowestCommonAncedtor: def __init__(self, G, root): self.n = len(G) self.tour = [0] * (2 * self.n - 1) self.depth_list = [0] * (2 * self.n - 1) self.id = [0] * self.n self.visit_id = 0 self.dfs(G, root, -1, 0) self._rmq_init(self.depth_list) def _rmq_init(self, arr): n = len(arr) self.N0 = 1 << (n - 1).bit_length() self.dat = [self.n] * (self.N0 - 1) + arr + [self.n ] * (self.N0 - n + 1) self.index = [0] * (self.N0 - 1) + list( range(n)) + [0] * (self.N0 - n + 1) dat = self.dat index = self.index for i in range(self.N0 - 2, -1, -1): if dat[2 * i + 1] > dat[2 * i + 2]: dat[i] = dat[2 * i + 2] index[i] = index[2 * i + 2] else: dat[i] = dat[2 * i + 1] index[i] = index[2 * i + 1] def _rmq_query(self, l, r): """最小値となるindexを返す""" l += self.N0 r += self.N0 s = self.n dat = self.dat index = self.index while l < r: if r & 1: r -= 1 if s > dat[r - 1]: s = dat[r - 1] res = index[r - 1] if l & 1: if s > dat[l - 1]: s = dat[l - 1] res = index[l - 1] l += 1 l >>= 1 r >>= 1 return res def dfs(self, G, vertex, parent, depth): self.id[vertex] = self.visit_id self.tour[self.visit_id] = vertex self.depth_list[self.visit_id] = depth self.visit_id += 1 for element in G[vertex]: if element != parent: self.dfs(G, element, vertex, depth + 1) self.tour[self.visit_id] = vertex self.depth_list[self.visit_id] = depth self.visit_id += 1 def get(self, u, v): l, r = self.id[u], self.id[v] if r < l: l, r = r, l q = self._rmq_query(l, r + 1) return self.tour[q] def dist(self, u, v): """点u,点vの距離""" lca = self.get(u, v) depth_u = self.depth_list[self.id[u]] depth_v = self.depth_list[self.id[v]] depth_lca = self.depth_list[self.id[lca]] return depth_u + depth_v - 2 * depth_lca #問.任意の2頂点の距離 n, q = map(int, input().split()) e = [[] for _ in range(n)] for _ in range(n - 1): a, b = map(int, input().split()) a -= 1 b -= 1 e[a].append(b) e[b].append(a) lca = LowestCommonAncedtor(e, 0) for _ in range(q): u, v = map(int, input().split()) u -= 1 v -= 1 if lca.dist(u, v) % 2 == 1: print("Road") else: print("Town")
[ "sys.setrecursionlimit" ]
[((12, 43), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000000)'], {}), '(10000000)\n', (33, 43), False, 'import sys\n')]
import h5py import numpy as np def load_data(fname): # load in an hdf5 file and return the X and y values data_file = h5py.File(fname) # load in X and y training data, fully into memory X = data_file['X'][:].reshape(-1, 1) # each row is a data point y = data_file['y'][:] return X, y def eval_fit(y_pred, y_true): # compute mean absolute error mae = np.mean(np.abs(y_pred - y_true)) return mae # don't normalize
[ "h5py.File", "numpy.abs" ]
[((127, 143), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (136, 143), False, 'import h5py\n'), ((395, 418), 'numpy.abs', 'np.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (401, 418), True, 'import numpy as np\n')]
import sys import os import urllib.parse import urllib.request import xml.etree.ElementTree as ET import shutil import sqlite3 def fetch_database(filename): r = urllib.request.urlopen('https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/publicsearch?xmldump=1') with open(filename, "wb") as f: f.write(r.read()) def fetch_assets(root): for entry in root.iter("entry"): print(entry.find("headword").text) for asset in entry.find("ASSET"): if ("picture" == asset.tag): fn = os.path.join(asset.tag, asset.text) if not os.path.exists(fn): try: os.makedirs(os.path.dirname(fn)) except IOError: pass r = urllib.request.urlopen("https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/assets/" + urllib.parse.quote(asset.text)) with open(fn, "wb") as f: f.write(r.read()) # Modify filenames to match the Android requirements (lowercase a-z and _ only) # Since iOS uses the same data source (the .dat file), update iOS to use the same image names. def normalize_image_filename(filename): normalized_filename = filename.replace('-', '_').lower() num_of_periods = normalized_filename.count('.') if (num_of_periods > 1): normalized_filename = normalized_filename.replace('.', '_', num_of_periods - 1) return normalized_filename def rename_assets(root): for entry in root.iter("entry"): for asset in entry.find("ASSET"): if ("picture" == asset.tag): old_filename = os.path.join(asset.tag, asset.text) if not os.path.isfile(old_filename): print("Picture {} does not exist!", old_filename) continue new_filename = normalize_image_filename(old_filename) os.rename(old_filename, new_filename) asset.text = new_filename.replace('picture/', '', 1) def write_datfile(root): with open("nzsl.dat", "w") as f: for entry in root.iter("entry"): headword = entry.attrib["id"], entry.find("headword").text sec = entry.find("glosssecondary") maori = entry.find("glossmaori") picture = entry.find("ASSET/picture") video = entry.find("ASSET/glossmain") handshape = entry.find("handshape") if picture is None: print("{} missing picture".format(headword)) if video is None: print("{} missing video".format(headword)) if handshape is None: print("{} missing handshape".format(headword)) print("\t".join([ entry.find("glossmain").text, sec.text if sec is not None else "", maori.text if maori is not None else "", os.path.basename(normalize_image_filename(picture.text)) if picture is not None else "", "https://nzsl-assets.vuw.ac.nz/dnzsl/freelex/assets/"+video.text.replace(".webm", ".mp4") if video is not None else "", handshape.text if handshape is not None else "", entry.find("location").text, ]), file=f) def write_sqlitefile(): if os.path.exists("nzsl.db"): os.unlink("nzsl.db") db = sqlite3.connect("nzsl.db") db.execute("create table words (gloss, minor, maori, picture, video, handshape, location, target)") with open("nzsl.dat") as f: for s in f: a = s.strip().split("\t") a.append("{}|{}|{}".format(normalise(a[0]), normalise(a[1]), normalise(a[2]))) assert all(32 <= ord(x) < 127 for x in a[-1]), a[-1] db.execute("insert into words values (?, ?, ?, ?, ?, ?, ?, ?)", a) db.commit() db.close() def copy_images_to_one_folder(): if (os.path.isdir("assets")): shutil.rmtree("assets") os.makedirs("assets") os.system("cp picture/*/*.png assets/ 2>/dev/null") # Helper functions def normalise(s): return (s.lower() .replace("ā", "a") .replace("ē", "e") .replace("é", "e") .replace("ī", "i") .replace("ō", "o") .replace("ū", "u"))
[ "os.makedirs", "os.unlink", "os.path.isdir", "os.rename", "os.path.dirname", "os.path.exists", "os.system", "os.path.isfile", "sqlite3.connect", "shutil.rmtree", "os.path.join" ]
[((3322, 3347), 'os.path.exists', 'os.path.exists', (['"""nzsl.db"""'], {}), "('nzsl.db')\n", (3336, 3347), False, 'import os\n'), ((3387, 3413), 'sqlite3.connect', 'sqlite3.connect', (['"""nzsl.db"""'], {}), "('nzsl.db')\n", (3402, 3413), False, 'import sqlite3\n'), ((3916, 3939), 'os.path.isdir', 'os.path.isdir', (['"""assets"""'], {}), "('assets')\n", (3929, 3939), False, 'import os\n'), ((3978, 3999), 'os.makedirs', 'os.makedirs', (['"""assets"""'], {}), "('assets')\n", (3989, 3999), False, 'import os\n'), ((4004, 4055), 'os.system', 'os.system', (['"""cp picture/*/*.png assets/ 2>/dev/null"""'], {}), "('cp picture/*/*.png assets/ 2>/dev/null')\n", (4013, 4055), False, 'import os\n'), ((3357, 3377), 'os.unlink', 'os.unlink', (['"""nzsl.db"""'], {}), "('nzsl.db')\n", (3366, 3377), False, 'import os\n'), ((3950, 3973), 'shutil.rmtree', 'shutil.rmtree', (['"""assets"""'], {}), "('assets')\n", (3963, 3973), False, 'import shutil\n'), ((531, 566), 'os.path.join', 'os.path.join', (['asset.tag', 'asset.text'], {}), '(asset.tag, asset.text)\n', (543, 566), False, 'import os\n'), ((1646, 1681), 'os.path.join', 'os.path.join', (['asset.tag', 'asset.text'], {}), '(asset.tag, asset.text)\n', (1658, 1681), False, 'import os\n'), ((1925, 1962), 'os.rename', 'os.rename', (['old_filename', 'new_filename'], {}), '(old_filename, new_filename)\n', (1934, 1962), False, 'import os\n'), ((590, 608), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (604, 608), False, 'import os\n'), ((1706, 1734), 'os.path.isfile', 'os.path.isfile', (['old_filename'], {}), '(old_filename)\n', (1720, 1734), False, 'import os\n'), ((671, 690), 'os.path.dirname', 'os.path.dirname', (['fn'], {}), '(fn)\n', (686, 690), False, 'import os\n')]
from setuptools import find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='tweet analyzer', author='<NAME>', license='', )
[ "setuptools.find_packages" ]
[((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')]
import time import logging import cv2 import numpy as np from deep_sort_realtime.deep_sort import nn_matching from deep_sort_realtime.deep_sort.detection import Detection from deep_sort_realtime.deep_sort.tracker import Tracker from deep_sort_realtime.utils.nms import non_max_suppression log_level = logging.DEBUG default_logger = logging.getLogger('DeepSORT') default_logger.setLevel(log_level) handler = logging.StreamHandler() handler.setLevel(log_level) formatter = logging.Formatter('[%(levelname)s] [%(name)s] %(message)s') handler.setFormatter(formatter) default_logger.addHandler(handler) class DeepSort(object): def __init__(self, max_age = 30, nms_max_overlap=1.0, max_cosine_distance=0.2, nn_budget=None, override_track_class=None, clock=None, embedder=True, half=True, bgr=True, logger=None, polygon=False): ''' Parameters ---------- max_age : Optional[int] = 30 Maximum number of missed misses before a track is deleted. nms_max_overlap : Optional[float] = 1.0 Non-maxima suppression threshold: Maximum detection overlap, if is 1.0, nms will be disabled max_cosine_distance : Optional[float] = 0.2 Gating threshold for cosine distance nn_budget : Optional[int] = None Maximum size of the appearance descriptors, if None, no budget is enforced override_track_class : Optional[object] = None Giving this will override default Track class, this must inherit Track clock : Optional[object] = None Clock custom object provides date for track naming and facilitates track id reset every day, preventing overflow and overly large track ids. For example clock class, please see `utils/clock.py` embedder : Optional[bool] = True Whether to use in-built embedder or not. If False, then embeddings must be given during update half : Optional[bool] = True Whether to use half precision for deep embedder bgr : Optional[bool] = True Whether frame given to embedder is expected to be BGR or not (RGB) logger : Optional[object] = None logger object polygon: Optional[bool] = False Whether detections are polygons (e.g. oriented bounding boxes) ''' if logger is None: self.logger = default_logger else: self.logger = logger # self.video_info = video_info # assert clock is not None self.nms_max_overlap = nms_max_overlap metric = nn_matching.NearestNeighborDistanceMetric( "cosine", max_cosine_distance, nn_budget) self.tracker = Tracker(metric, max_age = max_age, override_track_class=override_track_class, clock=clock, logger=self.logger) if embedder: from deep_sort_realtime.embedder.embedder_pytorch import MobileNetv2_Embedder as Embedder self.embedder = Embedder(half=half, max_batch_size=16, bgr=bgr) else: self.embedder = None self.polygon = polygon self.logger.info('DeepSort Tracker initialised') self.logger.info(f'- max age: {max_age}') self.logger.info(f'- appearance threshold: {max_cosine_distance}') self.logger.info(f'- nms threshold: {"OFF" if self.nms_max_overlap==1.0 else self.nms_max_overlap }') self.logger.info(f'- max num of appearance features: {nn_budget}') self.logger.info(f'- overriding track class : {"No" if override_track_class is None else "Yes"}' ) self.logger.info(f'- clock : {"No" if clock is None else "Yes"}' ) self.logger.info(f'- in-build embedder : {"No" if self.embedder is None else "Yes"}' ) self.logger.info(f'- polygon detections : {"No" if polygon is False else "Yes"}' ) def update_tracks(self, raw_detections, embeds=None, frame=None): """Run multi-target tracker on a particular sequence. Parameters ---------- raw_detections (horizontal bb) : List[ Tuple[ List[float or int], float, str ] ] List of detections, each in tuples of ( [left,top,w,h] , confidence, detection_class) raw_detections (polygon) : List[ List[float], List[int or str], List[float] ] List of Polygons, Classes, Confidences. All 3 sublists of the same length. A polygon defined as a ndarray-like [x1,y1,x2,y2,...]. embeds : Optional[ List[] ] = None List of appearance features corresponding to detections frame : Optional [ np.ndarray ] = None if embeds not given, Image frame must be given here, in [H,W,C]. Returns ------- list of track objects (Look into track.py for more info or see "main" section below in this script to see simple example) """ if embeds is None: if self.embedder is None: raise Exception('Embedder not created during init so embeddings must be given now!') if frame is None: raise Exception('either embeddings or frame must be given!') if not self.polygon: raw_detections = [ d for d in raw_detections if d[0][2] > 0 and d[0][3] > 0] if embeds is None: embeds = self.generate_embeds(frame, raw_detections) # Proper deep sort detection objects that consist of bbox, confidence and embedding. detections = self.create_detections(raw_detections, embeds) else: polygons, bounding_rects = self.process_polygons(raw_detections[0]) if embeds is None: embeds = self.generate_embeds_poly(frame, polygons, bounding_rects) # Proper deep sort detection objects that consist of bbox, confidence and embedding. detections = self.create_detections_poly(raw_detections, embeds, bounding_rects) # Run non-maxima suppression. boxes = np.array([d.ltwh for d in detections]) scores = np.array([d.confidence for d in detections]) if self.nms_max_overlap < 1.0: # nms_tic = time.perf_counter() indices = non_max_suppression( boxes, self.nms_max_overlap, scores) # nms_toc = time.perf_counter() # logger.debug(f'nms time: {nms_toc-nms_tic}s') detections = [detections[i] for i in indices] # Update tracker. self.tracker.predict() self.tracker.update(detections) return self.tracker.tracks def refresh_track_ids(self): self.tracker._next_id def generate_embeds(self, frame, raw_dets): crops = self.crop_bb(frame, raw_dets) return self.embedder.predict(crops) def generate_embeds_poly(self, frame, polygons, bounding_rects): crops = self.crop_poly_pad_black(frame, polygons, bounding_rects) return self.embedder.predict(crops) def create_detections(self, raw_dets, embeds): detection_list = [] for raw_det, embed in zip(raw_dets,embeds): detection_list.append(Detection(raw_det[0], raw_det[1], embed, class_name=raw_det[2])) #raw_det = [bbox, conf_score, class] return detection_list def create_detections_poly(self, dets, embeds, bounding_rects): detection_list = [] dets.extend([embeds, bounding_rects]) for raw_polygon, cl, score, embed, bounding_rect in zip(*dets): x,y,w,h = bounding_rect x = max(0, x) y = max(0, y) bbox = [x,y,w,h] detection_list.append(Detection(bbox, score, embed, class_name=cl, others=raw_polygon)) return detection_list @staticmethod def process_polygons(raw_polygons): polygons = [ [ polygon[x:x+2] for x in range(0, len(polygon), 2) ]for polygon in raw_polygons ] bounding_rects = [ cv2.boundingRect(np.array([polygon]).astype(int)) for polygon in polygons ] return polygons, bounding_rects @staticmethod def crop_bb(frame, raw_dets): crops = [] im_height, im_width = frame.shape[:2] for detection in raw_dets: l,t,w,h = [int(x) for x in detection[0]] r = l + w b = t + h crop_l = max(0, l) crop_r = min(im_width, r) crop_t = max(0, t) crop_b = min(im_height, b) crops.append(frame[crop_t:crop_b, crop_l:crop_r]) return crops @staticmethod def crop_poly_pad_black(frame, polygons, bounding_rects): masked_polys = [] im_height, im_width = frame.shape[:2] for polygon, bounding_rect in zip(polygons, bounding_rects): mask = np.zeros(frame.shape, dtype=np.uint8) polygon_mask = np.array([polygon]).astype(int) cv2.fillPoly(mask, polygon_mask, color=(255,255,255)) # apply the mask masked_image = cv2.bitwise_and(frame, mask) # crop masked image x,y,w,h = bounding_rect crop_l = max(0, x) crop_r = min(im_width, x+w) crop_t = max(0, y) crop_b = min(im_height, y+h) cropped = masked_image[crop_t:crop_b, crop_l:crop_r].copy() masked_polys.append(np.array(cropped)) return masked_polys
[ "deep_sort_realtime.deep_sort.tracker.Tracker", "deep_sort_realtime.utils.nms.non_max_suppression", "cv2.bitwise_and", "deep_sort_realtime.deep_sort.detection.Detection", "logging.StreamHandler", "numpy.zeros", "cv2.fillPoly", "logging.Formatter", "deep_sort_realtime.embedder.embedder_pytorch.MobileNetv2_Embedder", "numpy.array", "logging.getLogger", "deep_sort_realtime.deep_sort.nn_matching.NearestNeighborDistanceMetric" ]
[((336, 365), 'logging.getLogger', 'logging.getLogger', (['"""DeepSORT"""'], {}), "('DeepSORT')\n", (353, 365), False, 'import logging\n'), ((411, 434), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (432, 434), False, 'import logging\n'), ((475, 534), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] [%(name)s] %(message)s"""'], {}), "('[%(levelname)s] [%(name)s] %(message)s')\n", (492, 534), False, 'import logging\n'), ((2574, 2661), 'deep_sort_realtime.deep_sort.nn_matching.NearestNeighborDistanceMetric', 'nn_matching.NearestNeighborDistanceMetric', (['"""cosine"""', 'max_cosine_distance', 'nn_budget'], {}), "('cosine', max_cosine_distance,\n nn_budget)\n", (2615, 2661), False, 'from deep_sort_realtime.deep_sort import nn_matching\n'), ((2694, 2806), 'deep_sort_realtime.deep_sort.tracker.Tracker', 'Tracker', (['metric'], {'max_age': 'max_age', 'override_track_class': 'override_track_class', 'clock': 'clock', 'logger': 'self.logger'}), '(metric, max_age=max_age, override_track_class=override_track_class,\n clock=clock, logger=self.logger)\n', (2701, 2806), False, 'from deep_sort_realtime.deep_sort.tracker import Tracker\n'), ((5969, 6007), 'numpy.array', 'np.array', (['[d.ltwh for d in detections]'], {}), '([d.ltwh for d in detections])\n', (5977, 6007), True, 'import numpy as np\n'), ((6025, 6069), 'numpy.array', 'np.array', (['[d.confidence for d in detections]'], {}), '([d.confidence for d in detections])\n', (6033, 6069), True, 'import numpy as np\n'), ((2956, 3003), 'deep_sort_realtime.embedder.embedder_pytorch.MobileNetv2_Embedder', 'Embedder', ([], {'half': 'half', 'max_batch_size': '(16)', 'bgr': 'bgr'}), '(half=half, max_batch_size=16, bgr=bgr)\n', (2964, 3003), True, 'from deep_sort_realtime.embedder.embedder_pytorch import MobileNetv2_Embedder as Embedder\n'), ((6175, 6231), 'deep_sort_realtime.utils.nms.non_max_suppression', 'non_max_suppression', (['boxes', 'self.nms_max_overlap', 'scores'], {}), '(boxes, self.nms_max_overlap, scores)\n', (6194, 6231), False, 'from deep_sort_realtime.utils.nms import non_max_suppression\n'), ((8720, 8757), 'numpy.zeros', 'np.zeros', (['frame.shape'], {'dtype': 'np.uint8'}), '(frame.shape, dtype=np.uint8)\n', (8728, 8757), True, 'import numpy as np\n'), ((8829, 8884), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'polygon_mask'], {'color': '(255, 255, 255)'}), '(mask, polygon_mask, color=(255, 255, 255))\n', (8841, 8884), False, 'import cv2\n'), ((8940, 8968), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'mask'], {}), '(frame, mask)\n', (8955, 8968), False, 'import cv2\n'), ((7102, 7165), 'deep_sort_realtime.deep_sort.detection.Detection', 'Detection', (['raw_det[0]', 'raw_det[1]', 'embed'], {'class_name': 'raw_det[2]'}), '(raw_det[0], raw_det[1], embed, class_name=raw_det[2])\n', (7111, 7165), False, 'from deep_sort_realtime.deep_sort.detection import Detection\n'), ((7600, 7664), 'deep_sort_realtime.deep_sort.detection.Detection', 'Detection', (['bbox', 'score', 'embed'], {'class_name': 'cl', 'others': 'raw_polygon'}), '(bbox, score, embed, class_name=cl, others=raw_polygon)\n', (7609, 7664), False, 'from deep_sort_realtime.deep_sort.detection import Detection\n'), ((9285, 9302), 'numpy.array', 'np.array', (['cropped'], {}), '(cropped)\n', (9293, 9302), True, 'import numpy as np\n'), ((8785, 8804), 'numpy.array', 'np.array', (['[polygon]'], {}), '([polygon])\n', (8793, 8804), True, 'import numpy as np\n'), ((7903, 7922), 'numpy.array', 'np.array', (['[polygon]'], {}), '([polygon])\n', (7911, 7922), True, 'import numpy as np\n')]
import pymysql conn = pymysql.connect(host='127.0.0.1', user='root', passwd='<PASSWORD>', db='all0504') def get_user_set(): user_set = set() with open('../facebook/KOL_audience') as input_user_file: for line in input_user_file: if line.strip() == '': continue words = line.strip().split(';') user_ids = set(words[1].split(' ')) user_set = user_set.union(user_ids) return user_set def get_item_set(num=100): item_set = set() count = 0 with open('../facebook/{}_list'.format(SCENARIO)) as input_item_file: for line in input_item_file: if line.strip() == '': continue count += 1 item_info = line.strip().split() item_set.add(item_info[0]) if count == num: break return item_set def read_user_item_preference(user_set): user_item_likes = {} x = conn.cursor() x.execute("SELECT iduser, {}str FROM user".format(SCENARIO)) results = x.fetchall() for result in results: user_id = result[0] moviestr_items = result[1].split(';') if user_id in user_set or len(user_set) == 0: user_item_likes[user_id] = set() for movie_i in moviestr_items: user_item_likes[user_id].add(movie_i) return user_item_likes, set(user_item_likes.keys()) def read_user_relationship(user_set): user_relationship = {} for user in user_set: user_relationship[user] = set() x = conn.cursor() x.execute("SELECT iduser, friendstr FROM user") results = x.fetchall() for result in results: user_id = result[0] if user_id not in user_set: continue friends = result[1].split(';') for friend in friends: user_relationship[user_id].add(friend) return user_relationship def read_item_similarity_from_file(): SIM = {} with open("../facebook/{}_similarity".format(SCENARIO)) as inputfile: for line in inputfile: line = line.strip() if len(line) > 0: words = line.split() item1 = words[0] item2 = words[1] similarity = float(words[2]) if item1 not in SIM: SIM[item1] = {} SIM[item1][item2] = similarity return SIM def calculate_user_similairity(user1, user2): if user1 not in USER_PREF or user2 not in USER_PREF: return 0 num_co_liked_item = len(USER_PREF[user1] & USER_PREF[user2]) if num_co_liked_item == 0: user_sim = 0 else: user_sim = num_co_liked_item*1.0 / (len(USER_PREF[user2])+1) return user_sim def get_item_similarity(item1, item2): if item1 not in ITEM_SIM or item2 not in ITEM_SIM[item1]: return 0 return ITEM_SIM[item1][item2] def user_item_affinity(user_id, target_item, consider_item=True, consider_friend=True, indirect_friend=False, inindirect_friend=False): #indirect_friends: whether consider indirect friends score = 0 for item in ITEM_SET: if item in USER_PREF[user_id]: score += get_item_similarity(target_item, item) if score == 0: return 0 # early stop the users whith no item similarity (if continue, too slow for the algorithm) if not consider_item: score = 0.0001 considered_f = set() if consider_friend: for friend in USER_RELATION[user_id]: if friend in USER_PREF and target_item in USER_PREF[friend]: score += calculate_user_similairity(user_id, friend) considered_f.add(friend) # if counting indirect friends if indirect_friend and (friend in USER_RELATION): friends_of_f = USER_RELATION[friend] for friend_of_f in friends_of_f: if (friend_of_f in USER_PREF) and (friend_of_f not in considered_f) and target_item in USER_PREF[friend_of_f]: # score += calculate_user_similairity(user_id, friend) * calculate_user_similairity(friend, friend_of_f) score += calculate_user_similairity(user_id, friend_of_f) considered_f.add(friend_of_f) # if counting inindirect friends if inindirect_friend and (friend_of_f in USER_RELATION): for fff in USER_RELATION[friend_of_f]: if (fff in USER_PREF) and (fff not in considered_f) and target_item in USER_PREF[fff]: # score += calculate_user_similairity(user_id, friend) * calculate_user_similairity(friend, friend_of_f) * calculate_user_similairity(friend_of_f, fff) score += calculate_user_similairity(user_id, fff) considered_f.add(fff) return score def output_user_item_aff(): with open("user_{}_aff_score_100_both".format(SCENARIO), "w") as outputfile: outputfile.write('user {} score truth\n'.format(SCENARIO)) for user in USER_SET: if user not in USER_PREF: continue for item in ITEM_SET: score = user_item_affinity(user, item, consider_item=True, consider_friend=True) isTrue = 1 if item in USER_PREF[user] else 0 if score > 0: outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue)) def output_user_item_aff_only_item(): with open("user_{}_aff_score_100_only_item".format(SCENARIO), "w") as outputfile: outputfile.write('user {} score truth\n'.format(SCENARIO)) for user in USER_SET: if user not in USER_PREF: continue for item in ITEM_SET: score = user_item_affinity(user, item, consider_item=True, consider_friend=False) isTrue = 1 if item in USER_PREF[user] else 0 if score > 0: outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue)) def output_user_item_aff_only_friend(): with open("user_{}_aff_score_100_only_friend".format(SCENARIO), "w") as outputfile: outputfile.write('user {} score truth\n'.format(SCENARIO)) for user in USER_SET: if user not in USER_PREF: continue for item in ITEM_SET: score = user_item_affinity(user, item, consider_item=False, consider_friend=True) isTrue = 1 if item in USER_PREF[user] else 0 if score > 0: outputfile.write('{} {} {} {}\n'.format(user, item, score, isTrue)) if __name__ == '__main__': SCENARIO = 'book' print('reading user set...') USER_SET = get_user_set() print('reading item set...') ITEM_SET = get_item_set() print('reading user preference...') USER_PREF, USER_SET = read_user_item_preference(USER_SET) print(len(USER_PREF.keys())) print('reading user relationship...') USER_RELATION = read_user_relationship(USER_SET) print('reading item similarity...') ITEM_SIM = read_item_similarity_from_file() print('outputing to file...') output_user_item_aff() #output_user_item_aff_only_item() #output_user_item_aff_only_friend()
[ "pymysql.connect" ]
[((23, 109), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""127.0.0.1"""', 'user': '"""root"""', 'passwd': '"""<PASSWORD>"""', 'db': '"""all0504"""'}), "(host='127.0.0.1', user='root', passwd='<PASSWORD>', db=\n 'all0504')\n", (38, 109), False, 'import pymysql\n')]
import pytest from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging def test_help(): with pytest.raises(SystemExit) as e: main(["--help"]) assert e.value.args[0] == 0 def test_parse_cutoffs(): assert parse_cutoffs("5") == (0, 5) assert parse_cutoffs("6,7") == (6, 7) with pytest.raises(CommandLineError): parse_cutoffs("a,7") with pytest.raises(CommandLineError): parse_cutoffs("a") with pytest.raises(CommandLineError): parse_cutoffs("a,7") with pytest.raises(CommandLineError): parse_cutoffs("1,2,3") def test_parse_lengths(): assert parse_lengths("25") == (25, ) assert parse_lengths("17:25") == (17, 25) assert parse_lengths("25:") == (25, None) assert parse_lengths(":25") == (None, 25) with pytest.raises(CommandLineError): parse_lengths("1:2:3") with pytest.raises(CommandLineError): parse_lengths("a:2") with pytest.raises(CommandLineError): parse_lengths("a") with pytest.raises(CommandLineError): parse_lengths("2:a") with pytest.raises(CommandLineError): parse_lengths(":") def test_setup_logging(): import logging logger = logging.getLogger(__name__) setup_logging(logger, log_to_stderr=False, quiet=False, minimal=False, debug=False) logger.info("Log message") setup_logging(logger, log_to_stderr=False, debug=1) setup_logging(logger, log_to_stderr=False, quiet=True) setup_logging(logger, log_to_stderr=False, minimal=True)
[ "cutadapt.__main__.parse_cutoffs", "cutadapt.__main__.main", "cutadapt.__main__.parse_lengths", "pytest.raises", "cutadapt.__main__.setup_logging", "logging.getLogger" ]
[((1244, 1271), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1261, 1271), False, 'import logging\n'), ((1276, 1363), 'cutadapt.__main__.setup_logging', 'setup_logging', (['logger'], {'log_to_stderr': '(False)', 'quiet': '(False)', 'minimal': '(False)', 'debug': '(False)'}), '(logger, log_to_stderr=False, quiet=False, minimal=False,\n debug=False)\n', (1289, 1363), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((1395, 1446), 'cutadapt.__main__.setup_logging', 'setup_logging', (['logger'], {'log_to_stderr': '(False)', 'debug': '(1)'}), '(logger, log_to_stderr=False, debug=1)\n', (1408, 1446), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((1451, 1505), 'cutadapt.__main__.setup_logging', 'setup_logging', (['logger'], {'log_to_stderr': '(False)', 'quiet': '(True)'}), '(logger, log_to_stderr=False, quiet=True)\n', (1464, 1505), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((1510, 1566), 'cutadapt.__main__.setup_logging', 'setup_logging', (['logger'], {'log_to_stderr': '(False)', 'minimal': '(True)'}), '(logger, log_to_stderr=False, minimal=True)\n', (1523, 1566), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((141, 166), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (154, 166), False, 'import pytest\n'), ((181, 197), 'cutadapt.__main__.main', 'main', (["['--help']"], {}), "(['--help'])\n", (185, 197), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((269, 287), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""5"""'], {}), "('5')\n", (282, 287), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((309, 329), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""6,7"""'], {}), "('6,7')\n", (322, 329), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((349, 380), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (362, 380), False, 'import pytest\n'), ((390, 410), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""a,7"""'], {}), "('a,7')\n", (403, 410), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((420, 451), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (433, 451), False, 'import pytest\n'), ((461, 479), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""a"""'], {}), "('a')\n", (474, 479), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((489, 520), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (502, 520), False, 'import pytest\n'), ((530, 550), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""a,7"""'], {}), "('a,7')\n", (543, 550), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((560, 591), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (573, 591), False, 'import pytest\n'), ((601, 623), 'cutadapt.__main__.parse_cutoffs', 'parse_cutoffs', (['"""1,2,3"""'], {}), "('1,2,3')\n", (614, 623), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((663, 682), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""25"""'], {}), "('25')\n", (676, 682), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((704, 726), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""17:25"""'], {}), "('17:25')\n", (717, 726), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((750, 770), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""25:"""'], {}), "('25:')\n", (763, 770), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((796, 816), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['""":25"""'], {}), "(':25')\n", (809, 816), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((840, 871), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (853, 871), False, 'import pytest\n'), ((881, 903), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""1:2:3"""'], {}), "('1:2:3')\n", (894, 903), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((913, 944), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (926, 944), False, 'import pytest\n'), ((954, 974), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""a:2"""'], {}), "('a:2')\n", (967, 974), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((984, 1015), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (997, 1015), False, 'import pytest\n'), ((1025, 1043), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""a"""'], {}), "('a')\n", (1038, 1043), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((1053, 1084), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (1066, 1084), False, 'import pytest\n'), ((1094, 1114), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['"""2:a"""'], {}), "('2:a')\n", (1107, 1114), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n'), ((1124, 1155), 'pytest.raises', 'pytest.raises', (['CommandLineError'], {}), '(CommandLineError)\n', (1137, 1155), False, 'import pytest\n'), ((1165, 1183), 'cutadapt.__main__.parse_lengths', 'parse_lengths', (['""":"""'], {}), "(':')\n", (1178, 1183), False, 'from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging\n')]
from pdf417 import encode, render_image, render_svg import io class BarcodeGen(): #OWN CLASS - BarcodeGen def generateBarcode(self, text): codes = encode(text, columns=7, security_level=4) image = render_image(codes, scale=4, ratio=3, fg_color="black", bg_color="#FFFFFF") image.show() def generateBarcodeForWeb(self, text): codes = encode(text, columns=7, security_level=4) image = render_image(codes, scale=4, ratio=3, fg_color="black", bg_color="#FFFFFF") return image
[ "pdf417.render_image", "pdf417.encode" ]
[((152, 193), 'pdf417.encode', 'encode', (['text'], {'columns': '(7)', 'security_level': '(4)'}), '(text, columns=7, security_level=4)\n', (158, 193), False, 'from pdf417 import encode, render_image, render_svg\n'), ((204, 279), 'pdf417.render_image', 'render_image', (['codes'], {'scale': '(4)', 'ratio': '(3)', 'fg_color': '"""black"""', 'bg_color': '"""#FFFFFF"""'}), "(codes, scale=4, ratio=3, fg_color='black', bg_color='#FFFFFF')\n", (216, 279), False, 'from pdf417 import encode, render_image, render_svg\n'), ((346, 387), 'pdf417.encode', 'encode', (['text'], {'columns': '(7)', 'security_level': '(4)'}), '(text, columns=7, security_level=4)\n', (352, 387), False, 'from pdf417 import encode, render_image, render_svg\n'), ((398, 473), 'pdf417.render_image', 'render_image', (['codes'], {'scale': '(4)', 'ratio': '(3)', 'fg_color': '"""black"""', 'bg_color': '"""#FFFFFF"""'}), "(codes, scale=4, ratio=3, fg_color='black', bg_color='#FFFFFF')\n", (410, 473), False, 'from pdf417 import encode, render_image, render_svg\n')]
import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-deep') # Importing the dataset dataset = pd.read_csv('Salary_Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 1].values # Training/testing from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=0) # Regressor from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) # Visualisation plt.scatter(X_train, y_train, color = '#31b254') plt.plot(X_train, regressor.predict(X_train)) plt.title("Relationship between years of experience and salary") plt.xlabel("Years of experience") plt.ylabel("Salary") plt.show()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "pandas.read_csv", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ]
[((71, 100), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-deep"""'], {}), "('seaborn-deep')\n", (84, 100), True, 'import matplotlib.pyplot as plt\n'), ((137, 167), 'pandas.read_csv', 'pd.read_csv', (['"""Salary_Data.csv"""'], {}), "('Salary_Data.csv')\n", (148, 167), True, 'import pandas as pd\n'), ((339, 394), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(1 / 3)', 'random_state': '(0)'}), '(X, y, test_size=1 / 3, random_state=0)\n', (355, 394), False, 'from sklearn.model_selection import train_test_split\n'), ((521, 539), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (537, 539), False, 'from sklearn.linear_model import LinearRegression\n'), ((624, 670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_train', 'y_train'], {'color': '"""#31b254"""'}), "(X_train, y_train, color='#31b254')\n", (635, 670), True, 'import matplotlib.pyplot as plt\n'), ((719, 783), 'matplotlib.pyplot.title', 'plt.title', (['"""Relationship between years of experience and salary"""'], {}), "('Relationship between years of experience and salary')\n", (728, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years of experience"""'], {}), "('Years of experience')\n", (794, 817), True, 'import matplotlib.pyplot as plt\n'), ((818, 838), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary"""'], {}), "('Salary')\n", (828, 838), True, 'import matplotlib.pyplot as plt\n'), ((839, 849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (847, 849), True, 'import matplotlib.pyplot as plt\n')]
"""Generated client library for firestore version v1beta1.""" # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.py import base_api from googlecloudsdk.third_party.apis.firestore.v1beta1 import firestore_v1beta1_messages as messages class FirestoreV1beta1(base_api.BaseApiClient): """Generated client library for service firestore version v1beta1.""" MESSAGES_MODULE = messages BASE_URL = u'https://firestore.googleapis.com/' _PACKAGE = u'firestore' _SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/datastore'] _VERSION = u'v1beta1' _CLIENT_ID = '1042881264118.apps.googleusercontent.com' _CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b' _USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b' _CLIENT_CLASS_NAME = u'FirestoreV1beta1' _URL_VERSION = u'v1beta1' _API_KEY = None def __init__(self, url='', credentials=None, get_credentials=True, http=None, model=None, log_request=False, log_response=False, credentials_args=None, default_global_params=None, additional_http_headers=None, response_encoding=None): """Create a new firestore handle.""" url = url or self.BASE_URL super(FirestoreV1beta1, self).__init__( url, credentials=credentials, get_credentials=get_credentials, http=http, model=model, log_request=log_request, log_response=log_response, credentials_args=credentials_args, default_global_params=default_global_params, additional_http_headers=additional_http_headers, response_encoding=response_encoding) self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self) self.projects_databases_indexes = self.ProjectsDatabasesIndexesService(self) self.projects_databases = self.ProjectsDatabasesService(self) self.projects = self.ProjectsService(self) class ProjectsDatabasesDocumentsService(base_api.BaseApiService): """Service class for the projects_databases_documents resource.""" _NAME = u'projects_databases_documents' def __init__(self, client): super(FirestoreV1beta1.ProjectsDatabasesDocumentsService, self).__init__(client) self._upload_configs = { } def BatchGet(self, request, global_params=None): r"""Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Args: request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BatchGetDocumentsResponse) The response message. """ config = self.GetMethodConfig('BatchGet') return self._RunMethod( config, request, global_params=global_params) BatchGet.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:batchGet', http_method=u'POST', method_id=u'firestore.projects.databases.documents.batchGet', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:batchGet', request_field=u'batchGetDocumentsRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsBatchGetRequest', response_type_name=u'BatchGetDocumentsResponse', supports_download=False, ) def BeginTransaction(self, request, global_params=None): r"""Starts a new transaction. Args: request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BeginTransactionResponse) The response message. """ config = self.GetMethodConfig('BeginTransaction') return self._RunMethod( config, request, global_params=global_params) BeginTransaction.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction', http_method=u'POST', method_id=u'firestore.projects.databases.documents.beginTransaction', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:beginTransaction', request_field=u'beginTransactionRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsBeginTransactionRequest', response_type_name=u'BeginTransactionResponse', supports_download=False, ) def Commit(self, request, global_params=None): r"""Commits a transaction, while optionally updating documents. Args: request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (CommitResponse) The response message. """ config = self.GetMethodConfig('Commit') return self._RunMethod( config, request, global_params=global_params) Commit.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:commit', http_method=u'POST', method_id=u'firestore.projects.databases.documents.commit', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:commit', request_field=u'commitRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsCommitRequest', response_type_name=u'CommitResponse', supports_download=False, ) def CreateDocument(self, request, global_params=None): r"""Creates a new document. Args: request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('CreateDocument') return self._RunMethod( config, request, global_params=global_params) CreateDocument.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}', http_method=u'POST', method_id=u'firestore.projects.databases.documents.createDocument', ordered_params=[u'parent', u'collectionId'], path_params=[u'collectionId', u'parent'], query_params=[u'documentId', u'mask_fieldPaths'], relative_path=u'v1beta1/{+parent}/{collectionId}', request_field=u'document', request_type_name=u'FirestoreProjectsDatabasesDocumentsCreateDocumentRequest', response_type_name=u'Document', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes a document. Args: request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method=u'DELETE', method_id=u'firestore.projects.databases.documents.delete', ordered_params=[u'name'], path_params=[u'name'], query_params=[u'currentDocument_exists', u'currentDocument_updateTime'], relative_path=u'v1beta1/{+name}', request_field='', request_type_name=u'FirestoreProjectsDatabasesDocumentsDeleteRequest', response_type_name=u'Empty', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets a single document. Args: request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method=u'GET', method_id=u'firestore.projects.databases.documents.get', ordered_params=[u'name'], path_params=[u'name'], query_params=[u'mask_fieldPaths', u'readTime', u'transaction'], relative_path=u'v1beta1/{+name}', request_field='', request_type_name=u'FirestoreProjectsDatabasesDocumentsGetRequest', response_type_name=u'Document', supports_download=False, ) def List(self, request, global_params=None): r"""Lists documents. Args: request: (FirestoreProjectsDatabasesDocumentsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListDocumentsResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}', http_method=u'GET', method_id=u'firestore.projects.databases.documents.list', ordered_params=[u'parent', u'collectionId'], path_params=[u'collectionId', u'parent'], query_params=[u'mask_fieldPaths', u'orderBy', u'pageSize', u'pageToken', u'readTime', u'showMissing', u'transaction'], relative_path=u'v1beta1/{+parent}/{collectionId}', request_field='', request_type_name=u'FirestoreProjectsDatabasesDocumentsListRequest', response_type_name=u'ListDocumentsResponse', supports_download=False, ) def ListCollectionIds(self, request, global_params=None): r"""Lists all the collection IDs underneath a document. Args: request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListCollectionIdsResponse) The response message. """ config = self.GetMethodConfig('ListCollectionIds') return self._RunMethod( config, request, global_params=global_params) ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds', http_method=u'POST', method_id=u'firestore.projects.databases.documents.listCollectionIds', ordered_params=[u'parent'], path_params=[u'parent'], query_params=[], relative_path=u'v1beta1/{+parent}:listCollectionIds', request_field=u'listCollectionIdsRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest', response_type_name=u'ListCollectionIdsResponse', supports_download=False, ) def Listen(self, request, global_params=None): r"""Listens to changes. Args: request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListenResponse) The response message. """ config = self.GetMethodConfig('Listen') return self._RunMethod( config, request, global_params=global_params) Listen.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:listen', http_method=u'POST', method_id=u'firestore.projects.databases.documents.listen', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:listen', request_field=u'listenRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsListenRequest', response_type_name=u'ListenResponse', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates or inserts a document. Args: request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method=u'PATCH', method_id=u'firestore.projects.databases.documents.patch', ordered_params=[u'name'], path_params=[u'name'], query_params=[u'currentDocument_exists', u'currentDocument_updateTime', u'mask_fieldPaths', u'updateMask_fieldPaths'], relative_path=u'v1beta1/{+name}', request_field=u'document', request_type_name=u'FirestoreProjectsDatabasesDocumentsPatchRequest', response_type_name=u'Document', supports_download=False, ) def Rollback(self, request, global_params=None): r"""Rolls back a transaction. Args: request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Rollback') return self._RunMethod( config, request, global_params=global_params) Rollback.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:rollback', http_method=u'POST', method_id=u'firestore.projects.databases.documents.rollback', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:rollback', request_field=u'rollbackRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsRollbackRequest', response_type_name=u'Empty', supports_download=False, ) def RunQuery(self, request, global_params=None): r"""Runs a query. Args: request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (RunQueryResponse) The response message. """ config = self.GetMethodConfig('RunQuery') return self._RunMethod( config, request, global_params=global_params) RunQuery.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery', http_method=u'POST', method_id=u'firestore.projects.databases.documents.runQuery', ordered_params=[u'parent'], path_params=[u'parent'], query_params=[], relative_path=u'v1beta1/{+parent}:runQuery', request_field=u'runQueryRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsRunQueryRequest', response_type_name=u'RunQueryResponse', supports_download=False, ) def Write(self, request, global_params=None): r"""Streams batches of document updates and deletes, in order. Args: request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (WriteResponse) The response message. """ config = self.GetMethodConfig('Write') return self._RunMethod( config, request, global_params=global_params) Write.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:write', http_method=u'POST', method_id=u'firestore.projects.databases.documents.write', ordered_params=[u'database'], path_params=[u'database'], query_params=[], relative_path=u'v1beta1/{+database}/documents:write', request_field=u'writeRequest', request_type_name=u'FirestoreProjectsDatabasesDocumentsWriteRequest', response_type_name=u'WriteResponse', supports_download=False, ) class ProjectsDatabasesIndexesService(base_api.BaseApiService): """Service class for the projects_databases_indexes resource.""" _NAME = u'projects_databases_indexes' def __init__(self, client): super(FirestoreV1beta1.ProjectsDatabasesIndexesService, self).__init__(client) self._upload_configs = { } def Create(self, request, global_params=None): r"""Creates the specified index. A newly created index's initial state is `CREATING`. On completion of the returned google.longrunning.Operation, the state will be `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status. During creation, the process could result in an error, in which case the index will move to the `ERROR` state. The process can be recovered by fixing the data that caused the error, removing the index with delete, then re-creating the index with create. Indexes with a single field cannot be created. Args: request: (FirestoreProjectsDatabasesIndexesCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('Create') return self._RunMethod( config, request, global_params=global_params) Create.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes', http_method=u'POST', method_id=u'firestore.projects.databases.indexes.create', ordered_params=[u'parent'], path_params=[u'parent'], query_params=[], relative_path=u'v1beta1/{+parent}/indexes', request_field=u'googleFirestoreAdminV1beta1Index', request_type_name=u'FirestoreProjectsDatabasesIndexesCreateRequest', response_type_name=u'GoogleLongrunningOperation', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes an index. Args: request: (FirestoreProjectsDatabasesIndexesDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}', http_method=u'DELETE', method_id=u'firestore.projects.databases.indexes.delete', ordered_params=[u'name'], path_params=[u'name'], query_params=[], relative_path=u'v1beta1/{+name}', request_field='', request_type_name=u'FirestoreProjectsDatabasesIndexesDeleteRequest', response_type_name=u'Empty', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets an index. Args: request: (FirestoreProjectsDatabasesIndexesGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1beta1Index) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}', http_method=u'GET', method_id=u'firestore.projects.databases.indexes.get', ordered_params=[u'name'], path_params=[u'name'], query_params=[], relative_path=u'v1beta1/{+name}', request_field='', request_type_name=u'FirestoreProjectsDatabasesIndexesGetRequest', response_type_name=u'GoogleFirestoreAdminV1beta1Index', supports_download=False, ) def List(self, request, global_params=None): r"""Lists the indexes that match the specified filters. Args: request: (FirestoreProjectsDatabasesIndexesListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1beta1ListIndexesResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes', http_method=u'GET', method_id=u'firestore.projects.databases.indexes.list', ordered_params=[u'parent'], path_params=[u'parent'], query_params=[u'filter', u'pageSize', u'pageToken'], relative_path=u'v1beta1/{+parent}/indexes', request_field='', request_type_name=u'FirestoreProjectsDatabasesIndexesListRequest', response_type_name=u'GoogleFirestoreAdminV1beta1ListIndexesResponse', supports_download=False, ) class ProjectsDatabasesService(base_api.BaseApiService): """Service class for the projects_databases resource.""" _NAME = u'projects_databases' def __init__(self, client): super(FirestoreV1beta1.ProjectsDatabasesService, self).__init__(client) self._upload_configs = { } def ExportDocuments(self, request, global_params=None): r"""Exports a copy of all or a subset of documents from Google Cloud Firestore. to another storage system, such as Google Cloud Storage. Recent updates to documents may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage. Args: request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('ExportDocuments') return self._RunMethod( config, request, global_params=global_params) ExportDocuments.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}:exportDocuments', http_method=u'POST', method_id=u'firestore.projects.databases.exportDocuments', ordered_params=[u'name'], path_params=[u'name'], query_params=[], relative_path=u'v1beta1/{+name}:exportDocuments', request_field=u'googleFirestoreAdminV1beta1ExportDocumentsRequest', request_type_name=u'FirestoreProjectsDatabasesExportDocumentsRequest', response_type_name=u'GoogleLongrunningOperation', supports_download=False, ) def ImportDocuments(self, request, global_params=None): r"""Imports documents into Google Cloud Firestore. Existing documents with the. same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore. Args: request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('ImportDocuments') return self._RunMethod( config, request, global_params=global_params) ImportDocuments.method_config = lambda: base_api.ApiMethodInfo( flat_path=u'v1beta1/projects/{projectsId}/databases/{databasesId}:importDocuments', http_method=u'POST', method_id=u'firestore.projects.databases.importDocuments', ordered_params=[u'name'], path_params=[u'name'], query_params=[], relative_path=u'v1beta1/{+name}:importDocuments', request_field=u'googleFirestoreAdminV1beta1ImportDocumentsRequest', request_type_name=u'FirestoreProjectsDatabasesImportDocumentsRequest', response_type_name=u'GoogleLongrunningOperation', supports_download=False, ) class ProjectsService(base_api.BaseApiService): """Service class for the projects resource.""" _NAME = u'projects' def __init__(self, client): super(FirestoreV1beta1.ProjectsService, self).__init__(client) self._upload_configs = { }
[ "apitools.base.py.base_api.ApiMethodInfo" ]
[((2906, 3458), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:batchGet"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.batchGet"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:batchGet"""', 'request_field': 'u"""batchGetDocumentsRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsBatchGetRequest"""', 'response_type_name': 'u"""BatchGetDocumentsResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:batchGet'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.batchGet', ordered_params=[\n u'database'], path_params=[u'database'], query_params=[], relative_path\n =u'v1beta1/{+database}/documents:batchGet', request_field=\n u'batchGetDocumentsRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsBatchGetRequest',\n response_type_name=u'BatchGetDocumentsResponse', supports_download=False)\n", (2928, 3458), False, 'from apitools.base.py import base_api\n'), ((4069, 4649), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.beginTransaction"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:beginTransaction"""', 'request_field': 'u"""beginTransactionRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsBeginTransactionRequest"""', 'response_type_name': 'u"""BeginTransactionResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.beginTransaction',\n ordered_params=[u'database'], path_params=[u'database'], query_params=[\n ], relative_path=u'v1beta1/{+database}/documents:beginTransaction',\n request_field=u'beginTransactionRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsBeginTransactionRequest',\n response_type_name=u'BeginTransactionResponse', supports_download=False)\n", (4091, 4649), False, 'from apitools.base.py import base_api\n'), ((5246, 5768), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:commit"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.commit"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:commit"""', 'request_field': 'u"""commitRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsCommitRequest"""', 'response_type_name': 'u"""CommitResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:commit',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.commit', ordered_params=[\n u'database'], path_params=[u'database'], query_params=[], relative_path\n =u'v1beta1/{+database}/documents:commit', request_field=\n u'commitRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsCommitRequest', response_type_name\n =u'CommitResponse', supports_download=False)\n", (5268, 5768), False, 'from apitools.base.py import base_api\n'), ((6353, 6963), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.createDocument"""', 'ordered_params': "[u'parent', u'collectionId']", 'path_params': "[u'collectionId', u'parent']", 'query_params': "[u'documentId', u'mask_fieldPaths']", 'relative_path': 'u"""v1beta1/{+parent}/{collectionId}"""', 'request_field': 'u"""document"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsCreateDocumentRequest"""', 'response_type_name': 'u"""Document"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.createDocument',\n ordered_params=[u'parent', u'collectionId'], path_params=[\n u'collectionId', u'parent'], query_params=[u'documentId',\n u'mask_fieldPaths'], relative_path=u'v1beta1/{+parent}/{collectionId}',\n request_field=u'document', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsCreateDocumentRequest',\n response_type_name=u'Document', supports_download=False)\n", (6375, 6963), False, 'from apitools.base.py import base_api\n'), ((7507, 8057), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}"""', 'http_method': 'u"""DELETE"""', 'method_id': 'u"""firestore.projects.databases.documents.delete"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': "[u'currentDocument_exists', u'currentDocument_updateTime']", 'relative_path': 'u"""v1beta1/{+name}"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsDeleteRequest"""', 'response_type_name': 'u"""Empty"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}'\n , http_method=u'DELETE', method_id=\n u'firestore.projects.databases.documents.delete', ordered_params=[\n u'name'], path_params=[u'name'], query_params=[\n u'currentDocument_exists', u'currentDocument_updateTime'],\n relative_path=u'v1beta1/{+name}', request_field='', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsDeleteRequest', response_type_name\n =u'Empty', supports_download=False)\n", (7529, 8057), False, 'from apitools.base.py import base_api\n'), ((8598, 9125), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}"""', 'http_method': 'u"""GET"""', 'method_id': 'u"""firestore.projects.databases.documents.get"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': "[u'mask_fieldPaths', u'readTime', u'transaction']", 'relative_path': 'u"""v1beta1/{+name}"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsGetRequest"""', 'response_type_name': 'u"""Document"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}'\n , http_method=u'GET', method_id=\n u'firestore.projects.databases.documents.get', ordered_params=[u'name'],\n path_params=[u'name'], query_params=[u'mask_fieldPaths', u'readTime',\n u'transaction'], relative_path=u'v1beta1/{+name}', request_field='',\n request_type_name=u'FirestoreProjectsDatabasesDocumentsGetRequest',\n response_type_name=u'Document', supports_download=False)\n", (8620, 9125), False, 'from apitools.base.py import base_api\n'), ((9684, 10361), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}"""', 'http_method': 'u"""GET"""', 'method_id': 'u"""firestore.projects.databases.documents.list"""', 'ordered_params': "[u'parent', u'collectionId']", 'path_params': "[u'collectionId', u'parent']", 'query_params': "[u'mask_fieldPaths', u'orderBy', u'pageSize', u'pageToken', u'readTime',\n u'showMissing', u'transaction']", 'relative_path': 'u"""v1beta1/{+parent}/{collectionId}"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsListRequest"""', 'response_type_name': 'u"""ListDocumentsResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}'\n , http_method=u'GET', method_id=\n u'firestore.projects.databases.documents.list', ordered_params=[\n u'parent', u'collectionId'], path_params=[u'collectionId', u'parent'],\n query_params=[u'mask_fieldPaths', u'orderBy', u'pageSize', u'pageToken',\n u'readTime', u'showMissing', u'transaction'], relative_path=\n u'v1beta1/{+parent}/{collectionId}', request_field='',\n request_type_name=u'FirestoreProjectsDatabasesDocumentsListRequest',\n response_type_name=u'ListDocumentsResponse', supports_download=False)\n", (9706, 10361), False, 'from apitools.base.py import base_api\n'), ((11001, 11600), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.listCollectionIds"""', 'ordered_params': "[u'parent']", 'path_params': "[u'parent']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+parent}:listCollectionIds"""', 'request_field': 'u"""listCollectionIdsRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest"""', 'response_type_name': 'u"""ListCollectionIdsResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.listCollectionIds',\n ordered_params=[u'parent'], path_params=[u'parent'], query_params=[],\n relative_path=u'v1beta1/{+parent}:listCollectionIds', request_field=\n u'listCollectionIdsRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest',\n response_type_name=u'ListCollectionIdsResponse', supports_download=False)\n", (11023, 11600), False, 'from apitools.base.py import base_api\n'), ((12157, 12679), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:listen"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.listen"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:listen"""', 'request_field': 'u"""listenRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsListenRequest"""', 'response_type_name': 'u"""ListenResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:listen',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.listen', ordered_params=[\n u'database'], path_params=[u'database'], query_params=[], relative_path\n =u'v1beta1/{+database}/documents:listen', request_field=\n u'listenRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsListenRequest', response_type_name\n =u'ListenResponse', supports_download=False)\n", (12179, 12679), False, 'from apitools.base.py import base_api\n'), ((13235, 13845), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}"""', 'http_method': 'u"""PATCH"""', 'method_id': 'u"""firestore.projects.databases.documents.patch"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': "[u'currentDocument_exists', u'currentDocument_updateTime',\n u'mask_fieldPaths', u'updateMask_fieldPaths']", 'relative_path': 'u"""v1beta1/{+name}"""', 'request_field': 'u"""document"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsPatchRequest"""', 'response_type_name': 'u"""Document"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}'\n , http_method=u'PATCH', method_id=\n u'firestore.projects.databases.documents.patch', ordered_params=[\n u'name'], path_params=[u'name'], query_params=[\n u'currentDocument_exists', u'currentDocument_updateTime',\n u'mask_fieldPaths', u'updateMask_fieldPaths'], relative_path=\n u'v1beta1/{+name}', request_field=u'document', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsPatchRequest', response_type_name=\n u'Document', supports_download=False)\n", (13257, 13845), False, 'from apitools.base.py import base_api\n'), ((14400, 14923), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:rollback"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.rollback"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:rollback"""', 'request_field': 'u"""rollbackRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsRollbackRequest"""', 'response_type_name': 'u"""Empty"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:rollback'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.rollback', ordered_params=[\n u'database'], path_params=[u'database'], query_params=[], relative_path\n =u'v1beta1/{+database}/documents:rollback', request_field=\n u'rollbackRequest', request_type_name=\n u'FirestoreProjectsDatabasesDocumentsRollbackRequest',\n response_type_name=u'Empty', supports_download=False)\n", (14422, 14923), False, 'from apitools.base.py import base_api\n'), ((15482, 16023), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.runQuery"""', 'ordered_params': "[u'parent']", 'path_params': "[u'parent']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+parent}:runQuery"""', 'request_field': 'u"""runQueryRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsRunQueryRequest"""', 'response_type_name': 'u"""RunQueryResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery'\n , http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.runQuery', ordered_params=[\n u'parent'], path_params=[u'parent'], query_params=[], relative_path=\n u'v1beta1/{+parent}:runQuery', request_field=u'runQueryRequest',\n request_type_name=u'FirestoreProjectsDatabasesDocumentsRunQueryRequest',\n response_type_name=u'RunQueryResponse', supports_download=False)\n", (15504, 16023), False, 'from apitools.base.py import base_api\n'), ((16618, 17127), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/documents:write"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.documents.write"""', 'ordered_params': "[u'database']", 'path_params': "[u'database']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+database}/documents:write"""', 'request_field': 'u"""writeRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesDocumentsWriteRequest"""', 'response_type_name': 'u"""WriteResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/documents:write',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.documents.write', ordered_params=[\n u'database'], path_params=[u'database'], query_params=[], relative_path\n =u'v1beta1/{+database}/documents:write', request_field=u'writeRequest',\n request_type_name=u'FirestoreProjectsDatabasesDocumentsWriteRequest',\n response_type_name=u'WriteResponse', supports_download=False)\n", (16640, 17127), False, 'from apitools.base.py import base_api\n'), ((18569, 19094), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/indexes"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.indexes.create"""', 'ordered_params': "[u'parent']", 'path_params': "[u'parent']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+parent}/indexes"""', 'request_field': 'u"""googleFirestoreAdminV1beta1Index"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesIndexesCreateRequest"""', 'response_type_name': 'u"""GoogleLongrunningOperation"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.indexes.create', ordered_params=[\n u'parent'], path_params=[u'parent'], query_params=[], relative_path=\n u'v1beta1/{+parent}/indexes', request_field=\n u'googleFirestoreAdminV1beta1Index', request_type_name=\n u'FirestoreProjectsDatabasesIndexesCreateRequest', response_type_name=\n u'GoogleLongrunningOperation', supports_download=False)\n", (18591, 19094), False, 'from apitools.base.py import base_api\n'), ((19636, 20103), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}"""', 'http_method': 'u"""DELETE"""', 'method_id': 'u"""firestore.projects.databases.indexes.delete"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+name}"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesIndexesDeleteRequest"""', 'response_type_name': 'u"""Empty"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}'\n , http_method=u'DELETE', method_id=\n u'firestore.projects.databases.indexes.delete', ordered_params=[u'name'\n ], path_params=[u'name'], query_params=[], relative_path=\n u'v1beta1/{+name}', request_field='', request_type_name=\n u'FirestoreProjectsDatabasesIndexesDeleteRequest', response_type_name=\n u'Empty', supports_download=False)\n", (19658, 20103), False, 'from apitools.base.py import base_api\n'), ((20661, 21145), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}"""', 'http_method': 'u"""GET"""', 'method_id': 'u"""firestore.projects.databases.indexes.get"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+name}"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesIndexesGetRequest"""', 'response_type_name': 'u"""GoogleFirestoreAdminV1beta1Index"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes/{indexesId}'\n , http_method=u'GET', method_id=\n u'firestore.projects.databases.indexes.get', ordered_params=[u'name'],\n path_params=[u'name'], query_params=[], relative_path=\n u'v1beta1/{+name}', request_field='', request_type_name=\n u'FirestoreProjectsDatabasesIndexesGetRequest', response_type_name=\n u'GoogleFirestoreAdminV1beta1Index', supports_download=False)\n", (20683, 21145), False, 'from apitools.base.py import base_api\n'), ((21759, 22300), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}/indexes"""', 'http_method': 'u"""GET"""', 'method_id': 'u"""firestore.projects.databases.indexes.list"""', 'ordered_params': "[u'parent']", 'path_params': "[u'parent']", 'query_params': "[u'filter', u'pageSize', u'pageToken']", 'relative_path': 'u"""v1beta1/{+parent}/indexes"""', 'request_field': '""""""', 'request_type_name': 'u"""FirestoreProjectsDatabasesIndexesListRequest"""', 'response_type_name': 'u"""GoogleFirestoreAdminV1beta1ListIndexesResponse"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}/indexes',\n http_method=u'GET', method_id=\n u'firestore.projects.databases.indexes.list', ordered_params=[u'parent'\n ], path_params=[u'parent'], query_params=[u'filter', u'pageSize',\n u'pageToken'], relative_path=u'v1beta1/{+parent}/indexes',\n request_field='', request_type_name=\n u'FirestoreProjectsDatabasesIndexesListRequest', response_type_name=\n u'GoogleFirestoreAdminV1beta1ListIndexesResponse', supports_download=False)\n", (21781, 22300), False, 'from apitools.base.py import base_api\n'), ((23701, 24255), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}:exportDocuments"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.exportDocuments"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+name}:exportDocuments"""', 'request_field': 'u"""googleFirestoreAdminV1beta1ExportDocumentsRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesExportDocumentsRequest"""', 'response_type_name': 'u"""GoogleLongrunningOperation"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}:exportDocuments',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.exportDocuments', ordered_params=[\n u'name'], path_params=[u'name'], query_params=[], relative_path=\n u'v1beta1/{+name}:exportDocuments', request_field=\n u'googleFirestoreAdminV1beta1ExportDocumentsRequest', request_type_name\n =u'FirestoreProjectsDatabasesExportDocumentsRequest',\n response_type_name=u'GoogleLongrunningOperation', supports_download=False)\n", (23723, 24255), False, 'from apitools.base.py import base_api\n'), ((25192, 25746), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'flat_path': 'u"""v1beta1/projects/{projectsId}/databases/{databasesId}:importDocuments"""', 'http_method': 'u"""POST"""', 'method_id': 'u"""firestore.projects.databases.importDocuments"""', 'ordered_params': "[u'name']", 'path_params': "[u'name']", 'query_params': '[]', 'relative_path': 'u"""v1beta1/{+name}:importDocuments"""', 'request_field': 'u"""googleFirestoreAdminV1beta1ImportDocumentsRequest"""', 'request_type_name': 'u"""FirestoreProjectsDatabasesImportDocumentsRequest"""', 'response_type_name': 'u"""GoogleLongrunningOperation"""', 'supports_download': '(False)'}), "(flat_path=\n u'v1beta1/projects/{projectsId}/databases/{databasesId}:importDocuments',\n http_method=u'POST', method_id=\n u'firestore.projects.databases.importDocuments', ordered_params=[\n u'name'], path_params=[u'name'], query_params=[], relative_path=\n u'v1beta1/{+name}:importDocuments', request_field=\n u'googleFirestoreAdminV1beta1ImportDocumentsRequest', request_type_name\n =u'FirestoreProjectsDatabasesImportDocumentsRequest',\n response_type_name=u'GoogleLongrunningOperation', supports_download=False)\n", (25214, 25746), False, 'from apitools.base.py import base_api\n')]
from django.conf.urls import url from .views import (subjectpool_index, manage_experiment_session, get_session_events, manage_participant_attendance, send_invitations, get_invitations_count, invite_email_preview, experiment_session_signup, submit_experiment_session_signup, cancel_experiment_session_signup, download_experiment_session, add_participant) urlpatterns = [ url(r'^$', subjectpool_index, name='subjectpool_index'), url(r'^session/manage/(?P<pk>\-?\d+)$', manage_experiment_session, name='manage_experiment_session'), url(r'^session/detail/event/(?P<pk>\d+)$', manage_participant_attendance, name='session_event_detail'), url(r'^session/(?P<pk>\d+)/participant/add/$', add_participant, name='add_participant'), url(r'^session/invite$', send_invitations, name='send_invites'), url(r'^session/invite/count$', get_invitations_count, name='get_invitations_count'), url(r'^session/email-preview$', invite_email_preview, name='invite_email_preview'), url(r'^session/(?P<pk>\d+)/download/$', download_experiment_session, name='download_experiment_session'), url(r'^session/events$', get_session_events, name='session_events'), url(r'^signup/$', experiment_session_signup, name='experiment_session_signup'), url(r'^signup/submit/$', submit_experiment_session_signup, name='submit_experiment_session_signup'), url(r'^signup/cancel/$', cancel_experiment_session_signup, name='cancel_experiment_session_signup'), ]
[ "django.conf.urls.url" ]
[((436, 490), 'django.conf.urls.url', 'url', (['"""^$"""', 'subjectpool_index'], {'name': '"""subjectpool_index"""'}), "('^$', subjectpool_index, name='subjectpool_index')\n", (439, 490), False, 'from django.conf.urls import url\n'), ((497, 603), 'django.conf.urls.url', 'url', (['"""^session/manage/(?P<pk>\\\\-?\\\\d+)$"""', 'manage_experiment_session'], {'name': '"""manage_experiment_session"""'}), "('^session/manage/(?P<pk>\\\\-?\\\\d+)$', manage_experiment_session, name=\n 'manage_experiment_session')\n", (500, 603), False, 'from django.conf.urls import url\n'), ((603, 709), 'django.conf.urls.url', 'url', (['"""^session/detail/event/(?P<pk>\\\\d+)$"""', 'manage_participant_attendance'], {'name': '"""session_event_detail"""'}), "('^session/detail/event/(?P<pk>\\\\d+)$', manage_participant_attendance,\n name='session_event_detail')\n", (606, 709), False, 'from django.conf.urls import url\n'), ((711, 803), 'django.conf.urls.url', 'url', (['"""^session/(?P<pk>\\\\d+)/participant/add/$"""', 'add_participant'], {'name': '"""add_participant"""'}), "('^session/(?P<pk>\\\\d+)/participant/add/$', add_participant, name=\n 'add_participant')\n", (714, 803), False, 'from django.conf.urls import url\n'), ((804, 866), 'django.conf.urls.url', 'url', (['"""^session/invite$"""', 'send_invitations'], {'name': '"""send_invites"""'}), "('^session/invite$', send_invitations, name='send_invites')\n", (807, 866), False, 'from django.conf.urls import url\n'), ((873, 960), 'django.conf.urls.url', 'url', (['"""^session/invite/count$"""', 'get_invitations_count'], {'name': '"""get_invitations_count"""'}), "('^session/invite/count$', get_invitations_count, name=\n 'get_invitations_count')\n", (876, 960), False, 'from django.conf.urls import url\n'), ((962, 1048), 'django.conf.urls.url', 'url', (['"""^session/email-preview$"""', 'invite_email_preview'], {'name': '"""invite_email_preview"""'}), "('^session/email-preview$', invite_email_preview, name=\n 'invite_email_preview')\n", (965, 1048), False, 'from django.conf.urls import url\n'), ((1050, 1159), 'django.conf.urls.url', 'url', (['"""^session/(?P<pk>\\\\d+)/download/$"""', 'download_experiment_session'], {'name': '"""download_experiment_session"""'}), "('^session/(?P<pk>\\\\d+)/download/$', download_experiment_session, name=\n 'download_experiment_session')\n", (1053, 1159), False, 'from django.conf.urls import url\n'), ((1160, 1226), 'django.conf.urls.url', 'url', (['"""^session/events$"""', 'get_session_events'], {'name': '"""session_events"""'}), "('^session/events$', get_session_events, name='session_events')\n", (1163, 1226), False, 'from django.conf.urls import url\n'), ((1234, 1311), 'django.conf.urls.url', 'url', (['"""^signup/$"""', 'experiment_session_signup'], {'name': '"""experiment_session_signup"""'}), "('^signup/$', experiment_session_signup, name='experiment_session_signup')\n", (1237, 1311), False, 'from django.conf.urls import url\n'), ((1318, 1421), 'django.conf.urls.url', 'url', (['"""^signup/submit/$"""', 'submit_experiment_session_signup'], {'name': '"""submit_experiment_session_signup"""'}), "('^signup/submit/$', submit_experiment_session_signup, name=\n 'submit_experiment_session_signup')\n", (1321, 1421), False, 'from django.conf.urls import url\n'), ((1423, 1526), 'django.conf.urls.url', 'url', (['"""^signup/cancel/$"""', 'cancel_experiment_session_signup'], {'name': '"""cancel_experiment_session_signup"""'}), "('^signup/cancel/$', cancel_experiment_session_signup, name=\n 'cancel_experiment_session_signup')\n", (1426, 1526), False, 'from django.conf.urls import url\n')]
import math from vectors import Vector2 from vectors import Vector3 def get_car_facing_vector(car): pitch = float(car.rotation.pitch) yaw = float(car.rotation.yaw) facing_x = math.cos(pitch) * math.cos(yaw) facing_y = math.cos(pitch) * math.sin(yaw) return Vector2(facing_x, facing_y) def get_own_goal(agent): car = agent.car field_info = agent.get_field_info() team = 0 if field_info.goals[team].team_num != car.team: team = 1 return Vector3(field_info.goals[team].location) def get_opponents_goal(agent): car = agent.car field_info = agent.get_field_info() goal = Vector2(0, 0) team = 1 if field_info.goals[team].team_num == car.team: team = 0 return Vector3(field_info.goals[team].location) def time_needed_for_car(agent, car_to): car = agent.car difference = car.pos - car_to length = difference.magnitude() speed = get_xy_speed(agent) if speed == 0: speed = 0.00000000000000001 duration = length/speed return duration def predict_time_needed_for_car(agent, car_to): car = agent.car difference = car.pos - car_to length = difference.magnitude() speed = 1500 if speed == 0: speed = 0.00000000000000001 duration = length/speed return duration def own_color(self, packet): # get right color if packet.game_cars[self.index].team: color = self.renderer.create_color(255, 255, 127, 80) else: color = self.renderer.create_color(255, 22, 138, 255) return color def get_xy_speed(agent): car = agent.car car_xy_velocity = Vector3(car.velocity).to_2d() car_xy_velocity_magnitude = car_xy_velocity.magnitude() return car_xy_velocity_magnitude def difference_angles(angle1, angle2): angle1 = math.degrees(angle1) angle2 = math.degrees(angle2) angle1 = normalize_angle(angle1) angle2 = normalize_angle(angle2) difference = angle1 - angle2 if difference > 180: difference = 360 - difference return math.radians(difference) def normalize_angle(angle): while angle < 0: angle += 360 while angle >= 360: angle -= 360 return angle def get_car_speed(self, packet): my_car = packet.game_cars[self.index] def aim_to(agent, to, plus=0): car = agent.car car_direction = get_car_facing_vector(car) magnitude = Vector3(car.pos - to).magnitude() steer_correction = car_direction.correction_to(to.to_2d() - car.pos.to_2d()) z_correction = Vector3(car.pos - to).angle('z') draw_text(agent, str(math.degrees(z_correction)), 100) steer_correction *= -5 steer_correction += plus # aerial if to.z - car.pos.z > 500 and car.boost > 50 and agent.car_status != 'dribble': if math.degrees(z_correction) > 10 and to.z - car.pos.z > 500: # jump if still on ground if car.pos.z < 17.1: agent.jumps.append(1) print(car.pos.x, car.pos.y) # enable boost agent.controller_state.boost = True # sigmoid and correct agent.controller_state.pitch = cap_num((z_correction-car.rotation.pitch)+0.9, -1, 1) # if close to going to fly stop boost elif math.degrees(z_correction) > 4: agent.controller_state.boost = False # Drift if needs to steer much if abs(steer_correction) > 7: agent.controller_state.handbrake = True agent.controller_state.steer = cap_num(steer_correction, -1, 1) def double_jump(self): self.jumps.append(1) self.jumps.append(3) return self def more_colors(agent, color): if color == 'black': color = [255,255,255] if color == 'white': color = [0,0,0] if color == 'red': color = [255,0,0] if color == 'blue': color = [0,0,255] if color == 'green': color = [0,255,0] if color == 'own': if agent.car.team: color = [255, 127, 80] else: color = [22, 138, 255] return color def draw_text(agent, text, y): agent.renderer.draw_string_2d(0, y, 1, 1, text,agent.renderer.create_color(255, 255, 255, 255)) def sigmoid(x): return (1 / (1 + math.exp(-x)))*2 - 1 def cap_num(x, mini, maxi): if x > maxi: x = maxi if x < mini: x = mini return x def black(agent): return agent.renderer.create_color(255, 0, 0, 0) def white(agent): return agent.renderer.create_color(255, 255, 255, 255)
[ "math.exp", "vectors.Vector2", "math.radians", "math.sin", "math.cos", "math.degrees", "vectors.Vector3" ]
[((280, 307), 'vectors.Vector2', 'Vector2', (['facing_x', 'facing_y'], {}), '(facing_x, facing_y)\n', (287, 307), False, 'from vectors import Vector2\n'), ((479, 519), 'vectors.Vector3', 'Vector3', (['field_info.goals[team].location'], {}), '(field_info.goals[team].location)\n', (486, 519), False, 'from vectors import Vector3\n'), ((623, 636), 'vectors.Vector2', 'Vector2', (['(0)', '(0)'], {}), '(0, 0)\n', (630, 636), False, 'from vectors import Vector2\n'), ((722, 762), 'vectors.Vector3', 'Vector3', (['field_info.goals[team].location'], {}), '(field_info.goals[team].location)\n', (729, 762), False, 'from vectors import Vector3\n'), ((1766, 1786), 'math.degrees', 'math.degrees', (['angle1'], {}), '(angle1)\n', (1778, 1786), False, 'import math\n'), ((1800, 1820), 'math.degrees', 'math.degrees', (['angle2'], {}), '(angle2)\n', (1812, 1820), False, 'import math\n'), ((1994, 2018), 'math.radians', 'math.radians', (['difference'], {}), '(difference)\n', (2006, 2018), False, 'import math\n'), ((189, 204), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (197, 204), False, 'import math\n'), ((207, 220), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (215, 220), False, 'import math\n'), ((236, 251), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (244, 251), False, 'import math\n'), ((254, 267), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (262, 267), False, 'import math\n'), ((1586, 1607), 'vectors.Vector3', 'Vector3', (['car.velocity'], {}), '(car.velocity)\n', (1593, 1607), False, 'from vectors import Vector3\n'), ((2329, 2350), 'vectors.Vector3', 'Vector3', (['(car.pos - to)'], {}), '(car.pos - to)\n', (2336, 2350), False, 'from vectors import Vector3\n'), ((2463, 2484), 'vectors.Vector3', 'Vector3', (['(car.pos - to)'], {}), '(car.pos - to)\n', (2470, 2484), False, 'from vectors import Vector3\n'), ((2521, 2547), 'math.degrees', 'math.degrees', (['z_correction'], {}), '(z_correction)\n', (2533, 2547), False, 'import math\n'), ((2720, 2746), 'math.degrees', 'math.degrees', (['z_correction'], {}), '(z_correction)\n', (2732, 2746), False, 'import math\n'), ((3198, 3224), 'math.degrees', 'math.degrees', (['z_correction'], {}), '(z_correction)\n', (3210, 3224), False, 'import math\n'), ((4122, 4134), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (4130, 4134), False, 'import math\n')]
import cv2 import os import numpy as np from PIL import Image import picamera.array from picamera import PiCamera class Face(object): training_count = 5 threshold = 30 def __init__(self, casc_path, path="./passwords", camera_port=0): self.path = path self._cascade = cv2.CascadeClassifier(casc_path) self._port = camera_port def __del__(self): cv2.destroyAllWindows() def _capture_image(self): """ Throw away frames so we can let the camera adjust :return: list(list()) """ with PiCamera() as camera: with picamera.array.PiRGBArray(camera) as stream: camera.resolution = (640, 480) camera.capture(stream, 'bgr', use_video_port=True) return cv2.cv.cvtColor(stream.array, cv2.COLOR_BGR2GRAY) def _get_faces_and_frames(self): frame = self._capture_image() faces = self._cascade.detectMultiScale( frame, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE ) return faces, frame def _get_training_faf(self): """ :yield: faces, frame Gets all images required for training. * Note Won't stop getting images unless there is only one face per image. """ count = 0 error_count = 0 while count < self.training_count: # Ensures that we get at least self.training_count images error_count += 1 faces, frame = self._get_faces_and_frames() if len(faces) == 1: yield faces, frame count += 1 elif error_count >= 10: break def can_unlock(self): """ Will return false under the following conditions: 1. More than one face in the image 2. No images in password file 3. Face is not recognized :return: True if face is recognized False if face is not recognized """ face, frame = self._get_faces_and_frames() # Don't allow more than 1 face in the image if len(face) != 1: return False x, y, w, h = face[0] face = frame[y: y + h, x: x + w] recognizer = cv2.face.createLBPHFaceRecognizer() paths = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith("bmp")] if not paths: return False # Return since there are no images saved as a password # images will contains face images images = [] # labels will contains the label that is assigned to the image labels = [] nbr = 0 for image_path in paths: # Read the image image_pil = Image.open(image_path) # Convert the image format into numpy array image = np.array(image_pil, 'uint8') images.append(image) labels.append(nbr) nbr += 1 cv2.destroyAllWindows() # Perform the tranining recognizer.train(images, np.array(labels)) nbr_predicted, conf = recognizer.predict(face) if conf < self.threshold: return True return False def new_pass(self): count = 0 for face, frame in self._get_training_faf(): filename = "".join(["passwords/", str(count), ".bmp"]) x, y, w, h = face[0] frame = frame[y: y + h, x: x + w] count += 1 cv2.imwrite(filename, frame) def secure_new_pass(self): if self.can_unlock(): self.new_pass()
[ "cv2.cv.cvtColor", "cv2.imwrite", "PIL.Image.open", "cv2.face.createLBPHFaceRecognizer", "numpy.array", "cv2.CascadeClassifier", "cv2.destroyAllWindows", "os.path.join", "os.listdir", "picamera.PiCamera" ]
[((301, 333), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['casc_path'], {}), '(casc_path)\n', (322, 333), False, 'import cv2\n'), ((399, 422), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (420, 422), False, 'import cv2\n'), ((2335, 2370), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', ([], {}), '()\n', (2368, 2370), False, 'import cv2\n'), ((3054, 3077), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3075, 3077), False, 'import cv2\n'), ((579, 589), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (587, 589), False, 'from picamera import PiCamera\n'), ((2389, 2415), 'os.path.join', 'os.path.join', (['self.path', 'f'], {}), '(self.path, f)\n', (2401, 2415), False, 'import os\n'), ((2831, 2853), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2841, 2853), False, 'from PIL import Image\n'), ((2930, 2958), 'numpy.array', 'np.array', (['image_pil', '"""uint8"""'], {}), "(image_pil, 'uint8')\n", (2938, 2958), True, 'import numpy as np\n'), ((3143, 3159), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3151, 3159), True, 'import numpy as np\n'), ((3577, 3605), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'frame'], {}), '(filename, frame)\n', (3588, 3605), False, 'import cv2\n'), ((800, 849), 'cv2.cv.cvtColor', 'cv2.cv.cvtColor', (['stream.array', 'cv2.COLOR_BGR2GRAY'], {}), '(stream.array, cv2.COLOR_BGR2GRAY)\n', (815, 849), False, 'import cv2\n'), ((2425, 2446), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (2435, 2446), False, 'import os\n')]
# -*- coding: utf-8 -*- ''' Mrknow TV Add-on Copyright (C) 2016 mrknow This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import urlparse,base64,urllib import re, time, datetime import json from resources.lib.lib import control from resources.lib.lib import client from resources.lib.lib import stale def get(url, params={}): try: params['api_id'] = stale.pierwszatv_apiid params['checksum'] = stale.pierwszatv_checksum url = urlparse.urljoin('http://pierwsza.tv', url) url = url + '?' + urllib.urlencode(params) headers = {'Content-Type': 'application/json'} result = client.request(url, headers=headers, output='response', error=True) if not (result[0] == '401' or result[0] == '405'): return result[1] result = client.request(url, headers=headers) #control.log('ZZZZZZZZ PIerwsza result: %s' % result) return result except: pass def getstream(id): try: control.set_setting('pierwszatv.tokenExpireIn', '') control.set_setting('pierwszatv.serverId', '') control.set_setting('pierwszatv.streamId', '') control.set_setting('pierwszatv.token', '') if getPierwszaCredentialsInfo() == False: if control.yesnoDialog(control.lang(40003).encode('utf-8'), control.lang(30481).encode('utf-8'), '', 'Trakt', control.lang(30483).encode('utf-8'), control.lang(30482).encode('utf-8')): control.set_setting('pierwszatv.user', '') control.set_setting('pierwszatv.password', '') control.openSettings('1.4') raise Exception() url = '/api/stream/create' params = {} params['id'] =id params['user'] =control.setting('pierwszatv.user').strip() params['password'] = urllib.quote_plus(control.setting('pierwszatv.password')) result = get(url, params) control.log('x1x1x1: %s' % result) result = json.loads(result) if result['status'] == 'ok': #time.sleep(1) expirein = int(int(result['tokenExpireIn'])*0.75) expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein) control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple())))) control.set_setting('pierwszatv.serverId', result['serverId']) control.set_setting('pierwszatv.streamId', result['streamId']) control.set_setting('pierwszatv.token', result['token']) for i in range(0, 5): try: r = get('/api/stream/status', {'serverId': result['serverId'] , 'streamId': result['streamId'], 'token': result['token']}) r = json.loads(r) if r['status'] == 'ok': #control.infoDialog(control.lang(30489).encode('utf-8'), time=6000) for j in range(0, 20): time.sleep(1) control.infoDialog(control.lang(30489).encode('utf-8'), time=500) try: result2 = client.request(r['source']+'?token='+result['token'],safe=True, timeout='2') control.log('Pierwsza link check nr: %s: result:%s' % (j,result2)) if result2 == None: raise Exception() else: return r['source']+'?token='+result['token'] except: pass return r['source']+'?token='+result['token'] time.sleep(3) except: pass if result['status'] == 'error': control.infoDialog('%s' % result['message'].encode('utf-8')) control.dialog.ok(control.addonInfo('name'), result['message'].encode('utf-8'), '') return None except Exception as e: control.log('Error pierwsza.getstream %s' % e ) def getPierwszaCredentialsInfo(): user = control.setting('pierwszatv.user').strip() password = control.setting('pierwszatv.password') if (user == '' or password == ''): return False return True def streamrefresh(): try: #mynow = int(datetime.datetime.now().strftime('%s')) mynow = int(str(int(time.mktime(datetime.datetime.now().timetuple())))) expired = int(control.get_setting('pierwszatv.tokenExpireIn')) #control.log('XXXX Exp:%s Now:%s' % (expired, mynow)) if mynow>expired: control.log('Pierwsza refresh') url = '/api/stream/refresh' params = {} params['serverId'] =control.get_setting('pierwszatv.serverId') params['streamId'] =control.get_setting('pierwszatv.streamId') params['token'] = control.get_setting('pierwszatv.token') result = get(url, params) result = json.loads(result) expirein = int(int(result['tokenExpireIn'])*0.75) expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein) control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple())))) except Exception as e: control.log('Error pierwsza.refresh %s' % e ) raise Exception() def chanels(): items = [] try: result = get('/api/channels') result = json.loads(result) for i in result['channels']: try: items.append(i) except: pass if len(items) == 0: items = result except: control.log('Error pierwsza.chanels' ) pass return items
[ "resources.lib.lib.control.setting", "resources.lib.lib.control.log", "json.loads", "resources.lib.lib.control.addonInfo", "resources.lib.lib.control.lang", "resources.lib.lib.control.get_setting", "datetime.datetime.now", "time.sleep", "urlparse.urljoin", "datetime.timedelta", "urllib.urlencode", "resources.lib.lib.control.set_setting", "resources.lib.lib.client.request", "resources.lib.lib.control.openSettings" ]
[((4747, 4785), 'resources.lib.lib.control.setting', 'control.setting', (['"""pierwszatv.password"""'], {}), "('pierwszatv.password')\n", (4762, 4785), False, 'from resources.lib.lib import control\n'), ((1075, 1118), 'urlparse.urljoin', 'urlparse.urljoin', (['"""http://pierwsza.tv"""', 'url'], {}), "('http://pierwsza.tv', url)\n", (1091, 1118), False, 'import urlparse, base64, urllib\n'), ((1243, 1310), 'resources.lib.lib.client.request', 'client.request', (['url'], {'headers': 'headers', 'output': '"""response"""', 'error': '(True)'}), "(url, headers=headers, output='response', error=True)\n", (1257, 1310), False, 'from resources.lib.lib import client\n'), ((1405, 1441), 'resources.lib.lib.client.request', 'client.request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1419, 1441), False, 'from resources.lib.lib import client\n'), ((1589, 1640), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.tokenExpireIn"""', '""""""'], {}), "('pierwszatv.tokenExpireIn', '')\n", (1608, 1640), False, 'from resources.lib.lib import control\n'), ((1649, 1695), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.serverId"""', '""""""'], {}), "('pierwszatv.serverId', '')\n", (1668, 1695), False, 'from resources.lib.lib import control\n'), ((1704, 1750), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.streamId"""', '""""""'], {}), "('pierwszatv.streamId', '')\n", (1723, 1750), False, 'from resources.lib.lib import control\n'), ((1759, 1802), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.token"""', '""""""'], {}), "('pierwszatv.token', '')\n", (1778, 1802), False, 'from resources.lib.lib import control\n'), ((2525, 2559), 'resources.lib.lib.control.log', 'control.log', (["('x1x1x1: %s' % result)"], {}), "('x1x1x1: %s' % result)\n", (2536, 2559), False, 'from resources.lib.lib import control\n'), ((2577, 2595), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (2587, 2595), False, 'import json\n'), ((6052, 6070), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (6062, 6070), False, 'import json\n'), ((1145, 1169), 'urllib.urlencode', 'urllib.urlencode', (['params'], {}), '(params)\n', (1161, 1169), False, 'import urlparse, base64, urllib\n'), ((2442, 2480), 'resources.lib.lib.control.setting', 'control.setting', (['"""pierwszatv.password"""'], {}), "('pierwszatv.password')\n", (2457, 2480), False, 'from resources.lib.lib import control\n'), ((2932, 2994), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.serverId"""', "result['serverId']"], {}), "('pierwszatv.serverId', result['serverId'])\n", (2951, 2994), False, 'from resources.lib.lib import control\n'), ((3007, 3069), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.streamId"""', "result['streamId']"], {}), "('pierwszatv.streamId', result['streamId'])\n", (3026, 3069), False, 'from resources.lib.lib import control\n'), ((3082, 3138), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.token"""', "result['token']"], {}), "('pierwszatv.token', result['token'])\n", (3101, 3138), False, 'from resources.lib.lib import control\n'), ((4594, 4640), 'resources.lib.lib.control.log', 'control.log', (["('Error pierwsza.getstream %s' % e)"], {}), "('Error pierwsza.getstream %s' % e)\n", (4605, 4640), False, 'from resources.lib.lib import control\n'), ((4689, 4723), 'resources.lib.lib.control.setting', 'control.setting', (['"""pierwszatv.user"""'], {}), "('pierwszatv.user')\n", (4704, 4723), False, 'from resources.lib.lib import control\n'), ((5048, 5095), 'resources.lib.lib.control.get_setting', 'control.get_setting', (['"""pierwszatv.tokenExpireIn"""'], {}), "('pierwszatv.tokenExpireIn')\n", (5067, 5095), False, 'from resources.lib.lib import control\n'), ((5198, 5229), 'resources.lib.lib.control.log', 'control.log', (['"""Pierwsza refresh"""'], {}), "('Pierwsza refresh')\n", (5209, 5229), False, 'from resources.lib.lib import control\n'), ((5326, 5368), 'resources.lib.lib.control.get_setting', 'control.get_setting', (['"""pierwszatv.serverId"""'], {}), "('pierwszatv.serverId')\n", (5345, 5368), False, 'from resources.lib.lib import control\n'), ((5401, 5443), 'resources.lib.lib.control.get_setting', 'control.get_setting', (['"""pierwszatv.streamId"""'], {}), "('pierwszatv.streamId')\n", (5420, 5443), False, 'from resources.lib.lib import control\n'), ((5474, 5513), 'resources.lib.lib.control.get_setting', 'control.get_setting', (['"""pierwszatv.token"""'], {}), "('pierwszatv.token')\n", (5493, 5513), False, 'from resources.lib.lib import control\n'), ((5573, 5591), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (5583, 5591), False, 'import json\n'), ((5884, 5928), 'resources.lib.lib.control.log', 'control.log', (["('Error pierwsza.refresh %s' % e)"], {}), "('Error pierwsza.refresh %s' % e)\n", (5895, 5928), False, 'from resources.lib.lib import control\n'), ((6273, 6310), 'resources.lib.lib.control.log', 'control.log', (['"""Error pierwsza.chanels"""'], {}), "('Error pierwsza.chanels')\n", (6284, 6310), False, 'from resources.lib.lib import control\n'), ((2067, 2109), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.user"""', '""""""'], {}), "('pierwszatv.user', '')\n", (2086, 2109), False, 'from resources.lib.lib import control\n'), ((2126, 2172), 'resources.lib.lib.control.set_setting', 'control.set_setting', (['"""pierwszatv.password"""', '""""""'], {}), "('pierwszatv.password', '')\n", (2145, 2172), False, 'from resources.lib.lib import control\n'), ((2189, 2216), 'resources.lib.lib.control.openSettings', 'control.openSettings', (['"""1.4"""'], {}), "('1.4')\n", (2209, 2216), False, 'from resources.lib.lib import control\n'), ((2352, 2386), 'resources.lib.lib.control.setting', 'control.setting', (['"""pierwszatv.user"""'], {}), "('pierwszatv.user')\n", (2367, 2386), False, 'from resources.lib.lib import control\n'), ((2750, 2773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2771, 2773), False, 'import re, time, datetime\n'), ((2776, 2812), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'expirein'}), '(seconds=expirein)\n', (2794, 2812), False, 'import re, time, datetime\n'), ((4471, 4496), 'resources.lib.lib.control.addonInfo', 'control.addonInfo', (['"""name"""'], {}), "('name')\n", (4488, 4496), False, 'from resources.lib.lib import control\n'), ((5679, 5702), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5700, 5702), False, 'import re, time, datetime\n'), ((5705, 5741), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'expirein'}), '(seconds=expirein)\n', (5723, 5741), False, 'import re, time, datetime\n'), ((3362, 3375), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (3372, 3375), False, 'import json\n'), ((4264, 4277), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4274, 4277), False, 'import re, time, datetime\n'), ((1889, 1908), 'resources.lib.lib.control.lang', 'control.lang', (['(40003)'], {}), '(40003)\n', (1901, 1908), False, 'from resources.lib.lib import control\n'), ((1926, 1945), 'resources.lib.lib.control.lang', 'control.lang', (['(30481)'], {}), '(30481)\n', (1938, 1945), False, 'from resources.lib.lib import control\n'), ((1976, 1995), 'resources.lib.lib.control.lang', 'control.lang', (['(30483)'], {}), '(30483)\n', (1988, 1995), False, 'from resources.lib.lib import control\n'), ((2013, 2032), 'resources.lib.lib.control.lang', 'control.lang', (['(30482)'], {}), '(30482)\n', (2025, 2032), False, 'from resources.lib.lib import control\n'), ((3587, 3600), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3597, 3600), False, 'import re, time, datetime\n'), ((3770, 3855), 'resources.lib.lib.client.request', 'client.request', (["(r['source'] + '?token=' + result['token'])"], {'safe': '(True)', 'timeout': '"""2"""'}), "(r['source'] + '?token=' + result['token'], safe=True,\n timeout='2')\n", (3784, 3855), False, 'from resources.lib.lib import client\n'), ((3879, 3946), 'resources.lib.lib.control.log', 'control.log', (["('Pierwsza link check nr: %s: result:%s' % (j, result2))"], {}), "('Pierwsza link check nr: %s: result:%s' % (j, result2))\n", (3890, 3946), False, 'from resources.lib.lib import control\n'), ((4986, 5009), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5007, 5009), False, 'import re, time, datetime\n'), ((3648, 3667), 'resources.lib.lib.control.lang', 'control.lang', (['(30489)'], {}), '(30489)\n', (3660, 3667), False, 'from resources.lib.lib import control\n')]
# Generated by Django 3.2.11 on 2022-02-02 01:04 import django.contrib.gis.db.models.fields from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Property', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('direction', models.CharField(max_length=180)), ('number', models.PositiveIntegerField(blank=True, null=True)), ('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)), ], ), ]
[ "django.db.models.BigAutoField", "django.db.models.PositiveIntegerField", "django.db.models.CharField" ]
[((348, 444), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (367, 444), False, 'from django.db import migrations, models\n'), ((468, 499), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (484, 499), False, 'from django.db import migrations, models\n'), ((532, 564), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(180)'}), '(max_length=180)\n', (548, 564), False, 'from django.db import migrations, models\n'), ((594, 644), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (621, 644), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python import requests import re import sys import dlrnapi_client import influxdb_utils import json from promoter_utils import get_dlrn_instance_for_release from diskcache import Cache cache = Cache('/tmp/skipped_promotions_cache') cache.expire() promoter_skipping_regex = re.compile( ('.*promoter Skipping promotion of (.*) from (.*) to (.*), ' 'missing successful jobs: (.*)') ) html_link = "<a href='{}' target='_blank' >{}</a>" def get_failing_jobs_html(dlrn_hashes, release_name): failing_jobs_html = "" # If any of the jobs is still in progress in_progress = False try: dlrn = get_dlrn_instance_for_release(release_name) if dlrn: params = dlrnapi_client.Params2() params.commit_hash = dlrn_hashes['commit_hash'] params.distro_hash = dlrn_hashes['distro_hash'] params.success = str(False) failing_jobs = dlrn.api_repo_status_get(params) if len(failing_jobs) > 0: for i, failing_job in enumerate(failing_jobs): if failing_job.in_progress: in_progress = True failing_job_ln = html_link.format( failing_job.url, failing_job.job_id) if i > 0: failing_job_ln += "<br>" failing_jobs_html += failing_job_ln else: failing_jobs_html = ("<font color='red'>WARNING</font> " "expected perodic jobs have not run") except Exception as e: print(e) pass return (in_progress, failing_jobs_html) # FIXME: Use a decorator ? def get_cached_failing_jobs_html(dlrn_hashes, release_name): cache_key = "failing_jobs_html_{timestamp}_{repo_hash}".format( **dlrn_hashes) if cache_key not in cache: in_progress, failing_jobs_html = get_failing_jobs_html( dlrn_hashes, release_name) # Only chache if jobs have finished if not in_progress: cache.add(cache_key, failing_jobs_html, expire=259200) return cache[cache_key] def parse_skipped_promotions(release_name): skipped_promotions = [] promoter_logs = requests.get( "http://38.145.34.55/{}.log".format(release_name)) def get_log_time(log_line): log_line_splitted = log_line.split() log_time = "{} {}".format(log_line_splitted[0], log_line_splitted[1]) log_time = log_time.split(',')[0] return log_time for log_line in promoter_logs.iter_lines(): matched_regex = promoter_skipping_regex.match(log_line) if matched_regex: promotion = json.loads(matched_regex.group(1).replace("'", '"')) repo_hash = promotion['full_hash'] failing_jobs = matched_regex.group(3) skipped_promotion = { 'repo_hash': repo_hash, 'from_name': matched_regex.group(2), 'to_name': matched_regex.group(3), 'failing_jobs': failing_jobs, 'timestamp': get_log_time(log_line), 'release': release_name } skipped_promotions.append(skipped_promotion) return skipped_promotions def to_influxdb(skipped_promotions): influxdb_lines = [] influxdb_format = ( "skipped-promotions,repo_hash={repo_hash}" ",release={release},from_name={from_name}," "to_name={to_name} failing_jobs=\"{failing_jobs}\" " "{timestamp}") for skipped_promotion in skipped_promotions: skipped_promotion['timestamp'] = influxdb_utils.format_ts_from_str( skipped_promotion['timestamp']) influxdb_lines.append(influxdb_format.format(**skipped_promotion)) return influxdb_lines def main(): release = sys.argv[1] influxdb_lines = to_influxdb(parse_skipped_promotions(release)) print('\n'.join(influxdb_lines)) if __name__ == '__main__': main()
[ "promoter_utils.get_dlrn_instance_for_release", "influxdb_utils.format_ts_from_str", "dlrnapi_client.Params2", "diskcache.Cache", "re.compile" ]
[((210, 248), 'diskcache.Cache', 'Cache', (['"""/tmp/skipped_promotions_cache"""'], {}), "('/tmp/skipped_promotions_cache')\n", (215, 248), False, 'from diskcache import Cache\n'), ((291, 401), 're.compile', 're.compile', (['""".*promoter Skipping promotion of (.*) from (.*) to (.*), missing successful jobs: (.*)"""'], {}), "(\n '.*promoter Skipping promotion of (.*) from (.*) to (.*), missing successful jobs: (.*)'\n )\n", (301, 401), False, 'import re\n'), ((638, 681), 'promoter_utils.get_dlrn_instance_for_release', 'get_dlrn_instance_for_release', (['release_name'], {}), '(release_name)\n', (667, 681), False, 'from promoter_utils import get_dlrn_instance_for_release\n'), ((3650, 3715), 'influxdb_utils.format_ts_from_str', 'influxdb_utils.format_ts_from_str', (["skipped_promotion['timestamp']"], {}), "(skipped_promotion['timestamp'])\n", (3683, 3715), False, 'import influxdb_utils\n'), ((720, 744), 'dlrnapi_client.Params2', 'dlrnapi_client.Params2', ([], {}), '()\n', (742, 744), False, 'import dlrnapi_client\n')]
from __future__ import print_function import argparse import os import csv import numpy as np import random import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from data_utils.data_util import PointcloudScaleAndTranslate from data_utils.ModelNetDataLoader import ModelNetDataLoader from models.pointnet import PointNetCls, feature_transform_regularizer from models.pointnet2 import PointNet2ClsMsg from models.dgcnn import DGCNN from models.pointcnn import PointCNNCls from utils import progress_bar, log_row import sys sys.path.append("./emd/") import emd_module as emd def gen_train_log(args): if not os.path.isdir('logs_train'): os.mkdir('logs_train') logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name)) if os.path.exists(logname): with open(logname, 'a') as logfile: log_row(logname, ['']) log_row(logname, ['']) with open(logname, 'a') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow(['model type', 'data set', 'seed', 'train batch size', 'number of points in one batch', 'number of epochs', 'optimizer', 'learning rate', 'resume checkpoint path', 'feature transform', 'lambda for feature transform regularizer', 'data augment']) logwriter.writerow([args.model, args.data, args.seed, args.batch_size, args.num_points, args.epochs, args.optimizer, args.lr, args.resume, args.feature_transform, args.lambda_ft, args.augment]) logwriter.writerow(['Note', args.note]) logwriter.writerow(['']) def save_ckpt(args, epoch, model, optimizer, acc_list): if not os.path.isdir('checkpoints'): os.mkdir('checkpoints') if not os.path.isdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name)): os.mkdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name)) if acc_list[-1] > max(acc_list[:-1]): print('=====> Saving checkpoint...') print('the best test acc is', acc_list[-1]) state = { 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'args': args, 'acc_list': acc_list, } torch.save(state, 'checkpoints/%s_%s_%s/best.pth' % (args.data, args.model, args.name)) print('Successfully save checkpoint at epoch %d' % epoch) def cal_loss(pred, gold, smoothing=True): ''' Calculate cross entropy loss, apply label smoothing if needed. ''' gold = gold.contiguous().view(-1) if smoothing: eps = 0.2 n_class = pred.size(1) one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1) one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) log_prb = F.log_softmax(pred, dim=1) loss = -(one_hot * log_prb).sum(dim=1).mean() else: loss = F.cross_entropy(pred, gold, reduction='mean') return loss def test(model, test_loader, criterion): model.eval() correct = 0 total = 0 for j, data in enumerate(test_loader, 0): points, label = data points, label = points.to(device), label.to(device)[:, 0] if args.model == 'rscnn_kcutmix': fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint) points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1, 2).contiguous() # (B, N, 3) points = points.transpose(2, 1) # to be shape batch_size*3*N pred, trans_feat = model(points) loss = criterion(pred, label.long()) pred_choice = pred.data.max(1)[1] correct += pred_choice.eq(label.data).cpu().sum() total += label.size(0) progress_bar(j, len(test_loader), 'Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)' % (loss.item() / (j + 1), 100. * correct.item() / total, correct, total)) return loss.item() / (j + 1), 100. * correct.item() / total if __name__ == '__main__': ######################################## ## Set hypeparameters ######################################## parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default='pointnet', help='choose model type') parser.add_argument('--data', type=str, default='modelnet40', help='choose data set') parser.add_argument('--seed', type=int, default=0, help='manual random seed') parser.add_argument('--batch_size', type=int, default=16, help='input batch size') parser.add_argument('--num_points', type=int, default=1024, help='input batch size') parser.add_argument('--epochs', type=int, default=300, help='number of epochs to train for') parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training') parser.add_argument('--lr', default=0.001, type=float, help='learning rate in training') parser.add_argument('--resume', type=str, default='/', help='resume path') parser.add_argument('--feature_transform', type=int, default=1, help="use feature transform") parser.add_argument('--lambda_ft', type=float, default=0.001, help="lambda for feature transform") parser.add_argument('--augment', type=int, default=1, help='data argment to increase robustness') parser.add_argument('--name', type=str, default='train', help='name of the experiment') parser.add_argument('--note', type=str, default='', help='notation of the experiment') parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]') parser.add_argument('--beta', default=1, type=float, help='hyperparameter beta') parser.add_argument('--cutmix_prob', default=0.5, type=float, help='cutmix probability') args = parser.parse_args() args.feature_transform, args.augment = bool(args.feature_transform), bool(args.augment) ### Set random seed args.seed = args.seed if args.seed > 0 else random.randint(1, 10000) # dataset path DATA_PATH = './data/modelnet40_normal_resampled/' ######################################## ## Intiate model ######################################## device = torch.device("cuda" if torch.cuda.is_available() else "cpu") num_classes = 40 if args.model == 'dgcnn_kcutmix': model = DGCNN(num_classes) model = model.to(device) model = nn.DataParallel(model) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr * 100, momentum=0.9, weight_decay=1e-4) scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250, eta_min=1e-3) else: if args.model == 'pointnet_kcutmix': model = PointNetCls(num_classes, args.feature_transform) model = model.to(device) elif args.model == 'pointnet2_kcutmix': model = PointNet2ClsMsg(num_classes) model = model.to(device) model = nn.DataParallel(model) elif args.model == 'rscnn_kcutmix': from models.rscnn import RSCNN import models.rscnn_utils.pointnet2_utils as pointnet2_utils model = RSCNN(num_classes) model = model.to(device) model = nn.DataParallel(model) optimizer = torch.optim.Adam( model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4 ) scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) if len(args.resume) > 1: print('=====> Loading from checkpoint...') checkpoint = torch.load('./checkpoints/%s.pth' % args.resume) args = checkpoint['args'] torch.manual_seed(args.seed) print("Random Seed: ", args.seed) """if args.optimizer == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))""" model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) START_EPOCH = checkpoint['epoch'] + 1 acc_list = checkpoint['acc_list'] if args.model == 'dgcnn_kcutmix': scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250, eta_min=1e-3) else: scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) print('Successfully resumed!') else: print('=====> Building new model...') torch.manual_seed(args.seed) print("Random Seed: ", args.seed) START_EPOCH = 0 acc_list = [0] print('Successfully built!') ######################################## ## Load data ######################################## print('======> Loading data') TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='train', normal_channel=args.normal) TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='test', normal_channel=args.normal) train_loader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True) test_loader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False) PointcloudScaleAndTranslate = PointcloudScaleAndTranslate() print('======> Successfully loaded!') gen_train_log(args) logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name)) ######################################## ## Train ######################################## if args.model == 'dgcnn_kcutmix': criterion = cal_loss else: criterion = F.cross_entropy # nn.CrossEntropyLoss() if args.resume == '/': log_row(logname, ['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate']) for epoch in range(START_EPOCH, args.epochs): print('\nEpoch: %d' % epoch) scheduler_c.step(epoch) model.train() correct = 0 total = 0 for i, data in enumerate(train_loader, 0): points, target = data points, target = points.to(device), target.to(device)[:, 0] points = PointcloudScaleAndTranslate(points) if args.model == 'rscnn_kcutmix': fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint) fps_idx = fps_idx[:, np.random.choice(args.num_points, args.num_points, False)] points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1,2).contiguous() # (B, N, 3) # cutmix optimizer.zero_grad() r = np.random.rand(1) if args.beta > 0 and r < args.cutmix_prob: lam = np.random.beta(args.beta, args.beta) B = points.size()[0] rand_index = torch.randperm(B).cuda() target_a = target target_b = target[rand_index] point_a = torch.zeros(B, 1024, 3) point_b = torch.zeros(B, 1024, 3) point_c = torch.zeros(B, 1024, 3) point_a = points point_b = points[rand_index] point_c = points[rand_index] point_a, point_b, point_c = point_a.to(device), point_b.to(device), point_c.to(device) remd = emd.emdModule() remd = remd.cuda() dis, ind = remd(point_a, point_b, 0.005, 300) for ass in range(B): point_c[ass, :, :] = point_c[ass, ind[ass].long(), :] int_lam = int(args.num_points * lam) int_lam = max(1, int_lam) random_point = torch.from_numpy(np.random.choice(1024, B, replace=False, p=None)) # kNN ind1 = torch.tensor(range(B)) query = point_a[ind1, random_point].view(B, 1, 3) dist = torch.sqrt(torch.sum((point_a - query.repeat(1, args.num_points, 1)) ** 2, 2)) idxs = dist.topk(int_lam, dim=1, largest=False, sorted=True).indices for i2 in range(B): points[i2, idxs[i2], :] = point_c[i2, idxs[i2], :] # adjust lambda to exactly match point ratio lam = int_lam * 1.0 / args.num_points points = points.transpose(2, 1) pred, trans_feat = model(points) loss = criterion(pred, target_a.long()) * (1. - lam) + criterion(pred, target_b.long()) * lam else: points = points.transpose(2, 1) pred, trans_feat = model(points) loss = criterion(pred, target.long()) if args.feature_transform and args.model == 'pointnet_kcutmix': loss += feature_transform_regularizer(trans_feat) * args.lambda_ft loss.backward() optimizer.step() pred_choice = pred.data.max(1)[1] correct += pred_choice.eq(target.data).cpu().sum() total += target.size(0) progress_bar(i, len(train_loader), 'Train Loss: %.3f | Train Acc: %.3f%% (%d/%d)' % (loss.item() / (i + 1), 100. * correct.item() / total, correct, total)) train_loss, train_acc = loss.item() / (i + 1), 100. * correct.item() / total ### Test in batch test_loss, test_acc = test(model, test_loader, criterion) acc_list.append(test_acc) print('the best test acc is', max(acc_list)) ### Keep tracing log_row(logname, [epoch, train_loss, train_acc, test_loss, test_acc, optimizer.param_groups[0]['lr'], max(acc_list), np.argmax(acc_list) - 1]) save_ckpt(args, epoch, model, optimizer, acc_list)
[ "os.mkdir", "torch.optim.lr_scheduler.StepLR", "argparse.ArgumentParser", "numpy.argmax", "models.rscnn.RSCNN", "data_utils.ModelNetDataLoader.ModelNetDataLoader", "models.pointnet.PointNetCls", "sys.path.append", "models.pointnet2.PointNet2ClsMsg", "random.randint", "torch.utils.data.DataLoader", "torch.load", "os.path.exists", "torch.optim.lr_scheduler.CosineAnnealingLR", "utils.log_row", "torch.nn.functional.log_softmax", "numpy.random.choice", "torch.zeros", "csv.writer", "torch.zeros_like", "numpy.random.beta", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.cuda.is_available", "torch.randperm", "data_utils.data_util.PointcloudScaleAndTranslate", "emd_module.emdModule", "models.pointnet.feature_transform_regularizer", "os.path.isdir", "torch.save", "models.dgcnn.DGCNN", "models.rscnn_utils.pointnet2_utils.furthest_point_sample", "numpy.random.rand", "torch.nn.DataParallel" ]
[((637, 662), 'sys.path.append', 'sys.path.append', (['"""./emd/"""'], {}), "('./emd/')\n", (652, 662), False, 'import sys\n'), ((882, 905), 'os.path.exists', 'os.path.exists', (['logname'], {}), '(logname)\n', (896, 905), False, 'import os\n'), ((4609, 4634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4632, 4634), False, 'import argparse\n'), ((9632, 9737), 'data_utils.ModelNetDataLoader.ModelNetDataLoader', 'ModelNetDataLoader', ([], {'root': 'DATA_PATH', 'npoint': 'args.num_points', 'split': '"""train"""', 'normal_channel': 'args.normal'}), "(root=DATA_PATH, npoint=args.num_points, split='train',\n normal_channel=args.normal)\n", (9650, 9737), False, 'from data_utils.ModelNetDataLoader import ModelNetDataLoader\n'), ((9795, 9899), 'data_utils.ModelNetDataLoader.ModelNetDataLoader', 'ModelNetDataLoader', ([], {'root': 'DATA_PATH', 'npoint': 'args.num_points', 'split': '"""test"""', 'normal_channel': 'args.normal'}), "(root=DATA_PATH, npoint=args.num_points, split='test',\n normal_channel=args.normal)\n", (9813, 9899), False, 'from data_utils.ModelNetDataLoader import ModelNetDataLoader\n'), ((9957, 10076), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TRAIN_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(TRAIN_DATASET, batch_size=args.batch_size,\n shuffle=True, num_workers=4, drop_last=True)\n', (9984, 10076), False, 'import torch\n'), ((10140, 10260), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TEST_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'drop_last': '(False)'}), '(TEST_DATASET, batch_size=args.batch_size,\n shuffle=False, num_workers=4, drop_last=False)\n', (10167, 10260), False, 'import torch\n'), ((10341, 10370), 'data_utils.data_util.PointcloudScaleAndTranslate', 'PointcloudScaleAndTranslate', ([], {}), '()\n', (10368, 10370), False, 'from data_utils.data_util import PointcloudScaleAndTranslate\n'), ((731, 758), 'os.path.isdir', 'os.path.isdir', (['"""logs_train"""'], {}), "('logs_train')\n", (744, 758), False, 'import os\n'), ((769, 791), 'os.mkdir', 'os.mkdir', (['"""logs_train"""'], {}), "('logs_train')\n", (777, 791), False, 'import os\n'), ((1088, 1122), 'csv.writer', 'csv.writer', (['logfile'], {'delimiter': '""","""'}), "(logfile, delimiter=',')\n", (1098, 1122), False, 'import csv\n'), ((1901, 1929), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (1914, 1929), False, 'import os\n'), ((1940, 1963), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (1948, 1963), False, 'import os\n'), ((1976, 2050), 'os.path.isdir', 'os.path.isdir', (["('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))"], {}), "('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))\n", (1989, 2050), False, 'import os\n'), ((2061, 2130), 'os.mkdir', 'os.mkdir', (["('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))"], {}), "('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))\n", (2069, 2130), False, 'import os\n'), ((2517, 2608), 'torch.save', 'torch.save', (['state', "('checkpoints/%s_%s_%s/best.pth' % (args.data, args.model, args.name))"], {}), "(state, 'checkpoints/%s_%s_%s/best.pth' % (args.data, args.model,\n args.name))\n", (2527, 2608), False, 'import torch\n'), ((3078, 3104), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (3091, 3104), True, 'import torch.nn.functional as F\n'), ((3189, 3234), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['pred', 'gold'], {'reduction': '"""mean"""'}), "(pred, gold, reduction='mean')\n", (3204, 3234), True, 'import torch.nn.functional as F\n'), ((6475, 6499), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (6489, 6499), False, 'import random\n'), ((6848, 6866), 'models.dgcnn.DGCNN', 'DGCNN', (['num_classes'], {}), '(num_classes)\n', (6853, 6866), False, 'from models.dgcnn import DGCNN\n'), ((6918, 6940), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (6933, 6940), True, 'import torch.nn as nn\n'), ((7109, 7182), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', '(250)'], {'eta_min': '(0.001)'}), '(optimizer, 250, eta_min=0.001)\n', (7151, 7182), False, 'import torch\n'), ((8104, 8171), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.5)'}), '(optimizer, step_size=20, gamma=0.5)\n', (8135, 8171), False, 'import torch\n'), ((8276, 8324), 'torch.load', 'torch.load', (["('./checkpoints/%s.pth' % args.resume)"], {}), "('./checkpoints/%s.pth' % args.resume)\n", (8286, 8324), False, 'import torch\n'), ((8371, 8399), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8388, 8399), False, 'import torch\n'), ((9297, 9325), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9314, 9325), False, 'import torch\n'), ((10810, 10910), 'utils.log_row', 'log_row', (['logname', "['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate']"], {}), "(logname, ['Epoch', 'Train Loss', 'Train Acc', 'Test Loss',\n 'Test Acc', 'learning Rate'])\n", (10817, 10910), False, 'from utils import progress_bar, log_row\n'), ((965, 987), 'utils.log_row', 'log_row', (['logname', "['']"], {}), "(logname, [''])\n", (972, 987), False, 'from utils import progress_bar, log_row\n'), ((1001, 1023), 'utils.log_row', 'log_row', (['logname', "['']"], {}), "(logname, [''])\n", (1008, 1023), False, 'from utils import progress_bar, log_row\n'), ((3562, 3624), 'models.rscnn_utils.pointnet2_utils.furthest_point_sample', 'pointnet2_utils.furthest_point_sample', (['points', 'args.num_points'], {}), '(points, args.num_points)\n', (3599, 3624), True, 'import models.rscnn_utils.pointnet2_utils as pointnet2_utils\n'), ((6730, 6755), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6753, 6755), False, 'import torch\n'), ((7326, 7374), 'models.pointnet.PointNetCls', 'PointNetCls', (['num_classes', 'args.feature_transform'], {}), '(num_classes, args.feature_transform)\n', (7337, 7374), False, 'from models.pointnet import PointNetCls, feature_transform_regularizer\n'), ((9005, 9078), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', '(250)'], {'eta_min': '(0.001)'}), '(optimizer, 250, eta_min=0.001)\n', (9047, 9078), False, 'import torch\n'), ((9120, 9187), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.5)'}), '(optimizer, step_size=20, gamma=0.5)\n', (9151, 9187), False, 'import torch\n'), ((11280, 11315), 'data_utils.data_util.PointcloudScaleAndTranslate', 'PointcloudScaleAndTranslate', (['points'], {}), '(points)\n', (11307, 11315), False, 'from data_utils.data_util import PointcloudScaleAndTranslate\n'), ((11789, 11806), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (11803, 11806), True, 'import numpy as np\n'), ((2927, 2949), 'torch.zeros_like', 'torch.zeros_like', (['pred'], {}), '(pred)\n', (2943, 2949), False, 'import torch\n'), ((7483, 7511), 'models.pointnet2.PointNet2ClsMsg', 'PointNet2ClsMsg', (['num_classes'], {}), '(num_classes)\n', (7498, 7511), False, 'from models.pointnet2 import PointNet2ClsMsg\n'), ((7571, 7593), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7586, 7593), True, 'import torch.nn as nn\n'), ((11392, 11454), 'models.rscnn_utils.pointnet2_utils.furthest_point_sample', 'pointnet2_utils.furthest_point_sample', (['points', 'args.num_points'], {}), '(points, args.num_points)\n', (11429, 11454), True, 'import models.rscnn_utils.pointnet2_utils as pointnet2_utils\n'), ((11886, 11922), 'numpy.random.beta', 'np.random.beta', (['args.beta', 'args.beta'], {}), '(args.beta, args.beta)\n', (11900, 11922), True, 'import numpy as np\n'), ((12145, 12168), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12156, 12168), False, 'import torch\n'), ((12196, 12219), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12207, 12219), False, 'import torch\n'), ((12247, 12270), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12258, 12270), False, 'import torch\n'), ((12543, 12558), 'emd_module.emdModule', 'emd.emdModule', ([], {}), '()\n', (12556, 12558), True, 'import emd_module as emd\n'), ((7782, 7800), 'models.rscnn.RSCNN', 'RSCNN', (['num_classes'], {}), '(num_classes)\n', (7787, 7800), False, 'from models.rscnn import RSCNN\n'), ((7860, 7882), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7875, 7882), True, 'import torch.nn as nn\n'), ((12921, 12969), 'numpy.random.choice', 'np.random.choice', (['(1024)', 'B'], {'replace': '(False)', 'p': 'None'}), '(1024, B, replace=False, p=None)\n', (12937, 12969), True, 'import numpy as np\n'), ((14012, 14053), 'models.pointnet.feature_transform_regularizer', 'feature_transform_regularizer', (['trans_feat'], {}), '(trans_feat)\n', (14041, 14053), False, 'from models.pointnet import PointNetCls, feature_transform_regularizer\n'), ((14929, 14948), 'numpy.argmax', 'np.argmax', (['acc_list'], {}), '(acc_list)\n', (14938, 14948), True, 'import numpy as np\n'), ((11508, 11565), 'numpy.random.choice', 'np.random.choice', (['args.num_points', 'args.num_points', '(False)'], {}), '(args.num_points, args.num_points, False)\n', (11524, 11565), True, 'import numpy as np\n'), ((11993, 12010), 'torch.randperm', 'torch.randperm', (['B'], {}), '(B)\n', (12007, 12010), False, 'import torch\n')]
import random from flask import Flask app = Flask(__name__) @app.route('/') def index(): a = random.randrange(1, 10) b = random.randrange(1, 10) return f'{a} * {b} = {a * b}'
[ "flask.Flask", "random.randrange" ]
[((46, 61), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (51, 61), False, 'from flask import Flask\n'), ((101, 124), 'random.randrange', 'random.randrange', (['(1)', '(10)'], {}), '(1, 10)\n', (117, 124), False, 'import random\n'), ((133, 156), 'random.randrange', 'random.randrange', (['(1)', '(10)'], {}), '(1, 10)\n', (149, 156), False, 'import random\n')]
"""Generate a summary of a previously trained vowel recognition model. """ import torch import wavetorch import argparse import yaml import os import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl try: from helpers.plot import mpl_set_latex mpl_set_latex() except ImportError: import warnings warnings.warn('The helpers package is unavailable', ImportWarning) COL_TRAIN = "#1f77b4" COL_TEST = "#2ca02c" parser = argparse.ArgumentParser() parser.add_argument('filename', type=str) parser.add_argument('--vmin', type=float, default=1e-3) parser.add_argument('--vmax', type=float, default=1.0) parser.add_argument('--fig', type=str, default=None) parser.add_argument('--title_off', action='store_true') parser.add_argument('--labels', action='store_true') parser.add_argument('--vowel_samples', nargs='+', type=int, default=None) if __name__ == '__main__': args = parser.parse_args() model, history, history_state, cfg = wavetorch.io.load_model(args.filename) try: if cfg['seed'] is not None: torch.manual_seed(cfg['seed']) except: pass print("Configuration for model in %s is:" % args.filename) print(yaml.dump(cfg, default_flow_style=False)) sr = cfg['data']['sr'] gender = cfg['data']['gender'] vowels = cfg['data']['vowels'] N_classes = len(vowels) fig = plt.figure( figsize=(7, 4.75), constrained_layout=True) gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.4]) gs_left = gs[0].subgridspec(3, 2) gs_right = gs[1].subgridspec(N_classes+1, 1, height_ratios=[1 for i in range(0,N_classes)] + [0.05]) gs_bot = gs_left[2,:].subgridspec(1, 2) ax_cm_train0 = fig.add_subplot(gs_left[0,0]) ax_cm_test0 = fig.add_subplot(gs_left[0,1]) ax_cm_train1 = fig.add_subplot(gs_left[1,0]) ax_cm_test1 = fig.add_subplot(gs_left[1,1]) ax_loss = fig.add_subplot(gs_bot[0]) ax_acc = fig.add_subplot(gs_bot[1]) ax_fields = [fig.add_subplot(gs_right[i]) for i in range(0, N_classes+1)] history_mean = history.groupby('epoch').mean() history_std = history.groupby('epoch').std() epochs = history_mean.index.values ax_loss.fill_between(epochs, history_mean['loss_train'].values-history_std['loss_train'].values, history_mean['loss_train'].values+history_std['loss_train'].values, color=COL_TRAIN, alpha=0.15) ax_loss.plot(epochs, history_mean['loss_train'].values, "-", label="Training dataset", ms=4, color=COL_TRAIN) ax_loss.fill_between(epochs, history_mean['loss_test'].values-history_std['loss_test'].values, history_mean['loss_test'].values+history_std['loss_test'].values, color=COL_TEST, alpha=0.15) ax_loss.plot(epochs, history_mean['loss_test'].values, "-", label="Testing dataset", ms=4, color=COL_TEST) ax_loss.set_ylabel('Loss') ax_loss.set_xlabel('Training epoch \#') ax_acc.plot(epochs, history_mean['acc_train'].values*100, "-", label="Training dataset", ms=4, color=COL_TRAIN) ax_acc.fill_between(epochs, history_mean['acc_train'].values*100-history_std['acc_train'].values*100, history_mean['acc_train'].values*100+history_std['acc_train'].values*100, color=COL_TRAIN, alpha=0.15) ax_acc.plot(epochs, history_mean['acc_test'].values*100, "-", label="Testing dataset", ms=4, color=COL_TEST) ax_acc.fill_between(epochs, history_mean['acc_test'].values*100-history_std['acc_test'].values*100, history_mean['acc_test'].values*100+history_std['acc_test'].values*100, color=COL_TEST, alpha=0.15) ax_acc.set_xlabel('Training epoch \#') ax_acc.set_ylabel('Accuracy') ax_acc.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=10)) # ax_acc.set_ylim([20,100]) ax_loss.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=0.1)) # ax_loss.set_ylim([0.7,1.2]) ax_acc.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f\%%')) ax_loss.legend(fontsize='small') # ax_acc.annotate("%.1f%% training set accuracy" % (history_mean['acc_train'].tail(1).iloc[0]*100), xy=(0.1,0.1), xytext=(0,10), textcoords="offset points", xycoords="axes fraction", ha="left", va="bottom", color=COL_TRAIN) # ax_acc.annotate("%.1f%% testing set accuracy" % (history_mean['acc_test'].tail(1).iloc[0]*100), xy=(0.1,0.1), xycoords="axes fraction", ha="left", va="bottom", color=COL_TEST) ax_acc.annotate('%.1f\%%' % (history_mean['acc_train'].tail(1).iloc[0]*100), xy=(epochs[-1], history_mean['acc_train'].tail(1).iloc[0]*100), xycoords='data', xytext=(-1, 5), textcoords='offset points', ha='left', va='center', fontsize='small', color=COL_TRAIN, bbox=wavetorch.plot.bbox_white) ax_acc.annotate('%.1f\%%' % (history_mean['acc_test'].tail(1).iloc[0]*100), xy=(epochs[-1], history_mean['acc_test'].tail(1).iloc[0]*100), xycoords='data', xytext=(-1, -5), textcoords='offset points', ha='left', va='center', fontsize='small', color=COL_TEST, bbox=wavetorch.plot.bbox_white) print('Accuracy (train): %.1f%% +/- %.1f%%' % (history_mean['acc_train'].tail(1).iloc[0]*100, history_std['acc_train'].tail(1).iloc[0]*100)) print('Accuracy (test): %.1f%% +/- %.1f%%' % (history_mean['acc_test'].tail(1).iloc[0]*100, history_std['acc_test'].tail(1).iloc[0]*100)) cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).head(1).iloc[0] cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).head(1).iloc[0] wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train0, labels=vowels) wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test0, labels=vowels) cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).tail(1).iloc[0] cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).tail(1).iloc[0] wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train1, labels=vowels) wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test1, labels=vowels) X, Y, F = wavetorch.data.load_all_vowels(vowels, gender='both', sr=sr, random_state=0) # model.load_state_dict(history_state[cfg['training']['N_epochs']]) for i in range(N_classes): xb, yb = wavetorch.data.select_vowel_sample(X, Y, F, i, ind=args.vowel_samples[i] if args.vowel_samples is not None else None) with torch.no_grad(): field_dist = model(xb, output_fields=True) wavetorch.plot.total_field(model, field_dist, yb, ax=ax_fields[yb.argmax().item()], cbar=True, cax=ax_fields[-1], vmin=args.vmin, vmax=args.vmax) if args.labels: try: from helpers.plot import apply_panel_labels apply_panel_labels([ax_cm_train0, ax_cm_test0, ax_cm_train1, ax_cm_test1, ax_loss, ax_acc] + ax_fields[0:-1], xy=[(-35,0), (-35,0), (-35,0), (-35,0), (-25,0), (-40,0), (8,-6), (8,-6), (8,-6)], color=['k', 'k', 'k', 'k', 'k', 'k', 'w', 'w', 'w'], case='upper') except ImportError: import warnings warnings.warn('The helpers package is unavailable', ImportWarning) plt.show() if args.fig is not None: fig.savefig(args.fig, dpi=300) else: fig.savefig(os.path.splitext(args.filename)[0]+"_summary.png", dpi=300)
[ "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.show", "argparse.ArgumentParser", "torch.manual_seed", "yaml.dump", "wavetorch.io.load_model", "wavetorch.data.load_all_vowels", "helpers.plot.mpl_set_latex", "wavetorch.data.select_vowel_sample", "matplotlib.pyplot.figure", "matplotlib.ticker.FormatStrFormatter", "os.path.splitext", "warnings.warn", "torch.no_grad", "wavetorch.plot.confusion_matrix", "helpers.plot.apply_panel_labels" ]
[((457, 482), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (480, 482), False, 'import argparse\n'), ((275, 290), 'helpers.plot.mpl_set_latex', 'mpl_set_latex', ([], {}), '()\n', (288, 290), False, 'from helpers.plot import mpl_set_latex\n'), ((974, 1012), 'wavetorch.io.load_model', 'wavetorch.io.load_model', (['args.filename'], {}), '(args.filename)\n', (997, 1012), False, 'import wavetorch\n'), ((1379, 1433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 4.75)', 'constrained_layout': '(True)'}), '(figsize=(7, 4.75), constrained_layout=True)\n', (1389, 1433), True, 'import matplotlib.pyplot as plt\n'), ((5706, 5825), 'wavetorch.plot.confusion_matrix', 'wavetorch.plot.confusion_matrix', (['cm_train'], {'title': '"""Training dataset"""', 'normalize': '(True)', 'ax': 'ax_cm_train0', 'labels': 'vowels'}), "(cm_train, title='Training dataset',\n normalize=True, ax=ax_cm_train0, labels=vowels)\n", (5737, 5825), False, 'import wavetorch\n'), ((5826, 5943), 'wavetorch.plot.confusion_matrix', 'wavetorch.plot.confusion_matrix', (['cm_test'], {'title': '"""Testing dataset"""', 'normalize': '(True)', 'ax': 'ax_cm_test0', 'labels': 'vowels'}), "(cm_test, title='Testing dataset', normalize\n =True, ax=ax_cm_test0, labels=vowels)\n", (5857, 5943), False, 'import wavetorch\n'), ((6108, 6227), 'wavetorch.plot.confusion_matrix', 'wavetorch.plot.confusion_matrix', (['cm_train'], {'title': '"""Training dataset"""', 'normalize': '(True)', 'ax': 'ax_cm_train1', 'labels': 'vowels'}), "(cm_train, title='Training dataset',\n normalize=True, ax=ax_cm_train1, labels=vowels)\n", (6139, 6227), False, 'import wavetorch\n'), ((6228, 6345), 'wavetorch.plot.confusion_matrix', 'wavetorch.plot.confusion_matrix', (['cm_test'], {'title': '"""Testing dataset"""', 'normalize': '(True)', 'ax': 'ax_cm_test1', 'labels': 'vowels'}), "(cm_test, title='Testing dataset', normalize\n =True, ax=ax_cm_test1, labels=vowels)\n", (6259, 6345), False, 'import wavetorch\n'), ((6356, 6432), 'wavetorch.data.load_all_vowels', 'wavetorch.data.load_all_vowels', (['vowels'], {'gender': '"""both"""', 'sr': 'sr', 'random_state': '(0)'}), "(vowels, gender='both', sr=sr, random_state=0)\n", (6386, 6432), False, 'import wavetorch\n'), ((7518, 7528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7526, 7528), True, 'import matplotlib.pyplot as plt\n'), ((335, 401), 'warnings.warn', 'warnings.warn', (['"""The helpers package is unavailable"""', 'ImportWarning'], {}), "('The helpers package is unavailable', ImportWarning)\n", (348, 401), False, 'import warnings\n'), ((1200, 1240), 'yaml.dump', 'yaml.dump', (['cfg'], {'default_flow_style': '(False)'}), '(cfg, default_flow_style=False)\n', (1209, 1240), False, 'import yaml\n'), ((3830, 3865), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', ([], {'base': '(10)'}), '(base=10)\n', (3856, 3865), True, 'import matplotlib as mpl\n'), ((3935, 3971), 'matplotlib.ticker.MultipleLocator', 'mpl.ticker.MultipleLocator', ([], {'base': '(0.1)'}), '(base=0.1)\n', (3961, 3971), True, 'import matplotlib as mpl\n'), ((4045, 4086), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f\\\\%%"""'], {}), "('%.0f\\\\%%')\n", (4074, 4086), True, 'import matplotlib as mpl\n'), ((6554, 6675), 'wavetorch.data.select_vowel_sample', 'wavetorch.data.select_vowel_sample', (['X', 'Y', 'F', 'i'], {'ind': '(args.vowel_samples[i] if args.vowel_samples is not None else None)'}), '(X, Y, F, i, ind=args.vowel_samples[i] if\n args.vowel_samples is not None else None)\n', (6588, 6675), False, 'import wavetorch\n'), ((1071, 1101), 'torch.manual_seed', 'torch.manual_seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (1088, 1101), False, 'import torch\n'), ((6685, 6700), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6698, 6700), False, 'import torch\n'), ((7017, 7298), 'helpers.plot.apply_panel_labels', 'apply_panel_labels', (['([ax_cm_train0, ax_cm_test0, ax_cm_train1, ax_cm_test1, ax_loss, ax_acc] +\n ax_fields[0:-1])'], {'xy': '[(-35, 0), (-35, 0), (-35, 0), (-35, 0), (-25, 0), (-40, 0), (8, -6), (8, -\n 6), (8, -6)]', 'color': "['k', 'k', 'k', 'k', 'k', 'k', 'w', 'w', 'w']", 'case': '"""upper"""'}), "([ax_cm_train0, ax_cm_test0, ax_cm_train1, ax_cm_test1,\n ax_loss, ax_acc] + ax_fields[0:-1], xy=[(-35, 0), (-35, 0), (-35, 0), (\n -35, 0), (-25, 0), (-40, 0), (8, -6), (8, -6), (8, -6)], color=['k',\n 'k', 'k', 'k', 'k', 'k', 'w', 'w', 'w'], case='upper')\n", (7035, 7298), False, 'from helpers.plot import apply_panel_labels\n'), ((7441, 7507), 'warnings.warn', 'warnings.warn', (['"""The helpers package is unavailable"""', 'ImportWarning'], {}), "('The helpers package is unavailable', ImportWarning)\n", (7454, 7507), False, 'import warnings\n'), ((7627, 7658), 'os.path.splitext', 'os.path.splitext', (['args.filename'], {}), '(args.filename)\n', (7643, 7658), False, 'import os\n')]
import math import numpy as np import torch import torch.nn as nn import itertools class FuzzyLayer(nn.Module): def __init__(self, fuzzynum,channel): super(FuzzyLayer,self).__init__() self.n = fuzzynum self.channel = channel self.conv1 = nn.Conv2d(self.channel,1,3,padding=1) self.conv2 = nn.Conv2d(1,self.channel,3,padding=1) self.mu = nn.Parameter(torch.randn((self.channel,self.n))) self.sigma = nn.Parameter(torch.randn((self.channel,self.n))) self.bn1 = nn.BatchNorm2d(1, affine=True) self.bn2 = nn.BatchNorm2d(self.channel,affine=True) def forward(self, x): x = self.conv1(x) #tmp = torch.tensor(np.zeros((x.size()[0],x.size()[1],x.size()[2],x.size()[3])),dtype = torch.float).cuda() tmp = torch.tensor(np.zeros((x.size()[0], x.size()[1], x.size()[2], x.size()[3])), dtype=torch.float).to(device) for num,channel,w,h in itertools.product(range(x.size()[0]),range(x.size()[1]),range(x.size()[2]),range(x.size()[3])): for f in range(self.n): tmp[num][channel][w][h] -= ((x[num][channel][w][h]-self.mu[channel][f])/self.sigma[channel][f])**2 fNeural = self.bn2(self.conv2(self.bn1(torch.exp(tmp)))) return fNeural class FuzzyNet(nn.Module): def __init__(self, n_class=2, testing=False): super(FuzzyNet, self).__init__() self.fuzzy_4 = FuzzyLayer(fuzzynum=1,channel=512) self.fuzzy_3 = FuzzyLayer(fuzzynum=1,channel=256) self.fuzzy_2 = FuzzyLayer(fuzzynum=1,channel=128) self.fuzzy_1 = FuzzyLayer(fuzzynum=1, channel=64) self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2 self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4 self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8 self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16 self.deconv1 = nn.ConvTranspose2d(in_channels=512,out_channels=256,kernel_size=(2,2),stride=(2,2),bias=False) self.deconv2 = nn.ConvTranspose2d(in_channels=256,out_channels=128,kernel_size=(2,2),stride=(2,2),bias=False) self.deconv3 = nn.ConvTranspose2d(in_channels=128,out_channels=64,kernel_size=(2,2),stride=(2,2),bias=False) self.deconv4 = nn.ConvTranspose2d(in_channels=64,out_channels=6,kernel_size=(2,2),stride=(2,2),bias=False) # self.fbn1 = nn.BatchNorm2d(64, affine=True) self.fbn2 = nn.BatchNorm2d(128, affine=True) self.fbn3 = nn.BatchNorm2d(256, affine=True) self.fbn4 = nn.BatchNorm2d(512, affine=True) self.bn1 = nn.BatchNorm2d(512, affine=True) self.bn2 = nn.BatchNorm2d(256, affine=True) self.bn3 = nn.BatchNorm2d(128, affine=True) self.bn4 = nn.BatchNorm2d(64, affine=True) self.testing = testing self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) #m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels*m.in_channels m.weight.data.normal_(0,math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() if isinstance(m,nn.Linear): m.weight.data.normal_(0, 1) for param in self.parameters(): param.requires_grad = True def forward(self, x): h = x h = self.relu1_1(self.conv1_1(h)) h = self.relu1_2(self.conv1_2(h)) h = self.pool1(h) c1 = h f1 = self.fbn1(self.fuzzy_1(c1))+c1 h = self.relu2_1(self.conv2_1(h)) h = self.relu2_2(self.conv2_2(h)) h = self.pool2(h+g) c2 = h f2 = self.fbn2(self.fuzzy_2(c2))+c2 h = self.relu3_1(self.conv3_1(h)) h = self.relu3_2(self.conv3_2(h)) h = self.relu3_3(self.conv3_3(h)) h = self.pool3(h+g) c3 = h f3 = self.fbn3(self.fuzzy_3(c3))+c3 h = self.relu4_1(self.conv4_1(h)) h = self.relu4_2(self.conv4_2(h)) h = self.relu4_3(self.conv4_3(h)) h = self.pool4(h+g) c4 = h f4 = self.fbn4(self.fuzzy_4(c4)) h = self.bn1(h) h = self.bn2(self.deconv1(h)) h = self.bn3(self.deconv2(h)) h = self.bn4(self.deconv3(h)) h = self.deconv4(h) return h
[ "torch.nn.ReLU", "torch.nn.ConvTranspose2d", "math.sqrt", "torch.nn.Conv2d", "torch.randn", "torch.exp", "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d" ]
[((249, 289), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.channel', '(1)', '(3)'], {'padding': '(1)'}), '(self.channel, 1, 3, padding=1)\n', (258, 289), True, 'import torch.nn as nn\n'), ((302, 342), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.channel', '(3)'], {'padding': '(1)'}), '(1, self.channel, 3, padding=1)\n', (311, 342), True, 'import torch.nn as nn\n'), ((478, 508), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {'affine': '(True)'}), '(1, affine=True)\n', (492, 508), True, 'import torch.nn as nn\n'), ((522, 563), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.channel'], {'affine': '(True)'}), '(self.channel, affine=True)\n', (536, 563), True, 'import torch.nn as nn\n'), ((1500, 1530), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)'], {'padding': '(1)'}), '(3, 64, 3, padding=1)\n', (1509, 1530), True, 'import torch.nn as nn\n'), ((1548, 1569), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1555, 1569), True, 'import torch.nn as nn\n'), ((1587, 1618), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'padding': '(1)'}), '(64, 64, 3, padding=1)\n', (1596, 1618), True, 'import torch.nn as nn\n'), ((1636, 1657), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1643, 1657), True, 'import torch.nn as nn\n'), ((1673, 1714), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(2, stride=2, ceil_mode=True)\n', (1685, 1714), True, 'import torch.nn as nn\n'), ((1742, 1774), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)'], {'padding': '(1)'}), '(64, 128, 3, padding=1)\n', (1751, 1774), True, 'import torch.nn as nn\n'), ((1792, 1813), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1799, 1813), True, 'import torch.nn as nn\n'), ((1831, 1864), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (1840, 1864), True, 'import torch.nn as nn\n'), ((1882, 1903), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1889, 1903), True, 'import torch.nn as nn\n'), ((1919, 1960), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(2, stride=2, ceil_mode=True)\n', (1931, 1960), True, 'import torch.nn as nn\n'), ((1987, 2020), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'padding': '(1)'}), '(128, 256, 3, padding=1)\n', (1996, 2020), True, 'import torch.nn as nn\n'), ((2038, 2059), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2045, 2059), True, 'import torch.nn as nn\n'), ((2077, 2110), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)'], {'padding': '(1)'}), '(256, 256, 3, padding=1)\n', (2086, 2110), True, 'import torch.nn as nn\n'), ((2128, 2149), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2135, 2149), True, 'import torch.nn as nn\n'), ((2167, 2200), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)'], {'padding': '(1)'}), '(256, 256, 3, padding=1)\n', (2176, 2200), True, 'import torch.nn as nn\n'), ((2218, 2239), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2225, 2239), True, 'import torch.nn as nn\n'), ((2255, 2296), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(2, stride=2, ceil_mode=True)\n', (2267, 2296), True, 'import torch.nn as nn\n'), ((2323, 2356), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)'], {'padding': '(1)'}), '(256, 512, 3, padding=1)\n', (2332, 2356), True, 'import torch.nn as nn\n'), ((2374, 2395), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2381, 2395), True, 'import torch.nn as nn\n'), ((2413, 2446), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3)'], {'padding': '(1)'}), '(512, 512, 3, padding=1)\n', (2422, 2446), True, 'import torch.nn as nn\n'), ((2464, 2485), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2471, 2485), True, 'import torch.nn as nn\n'), ((2503, 2536), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3)'], {'padding': '(1)'}), '(512, 512, 3, padding=1)\n', (2512, 2536), True, 'import torch.nn as nn\n'), ((2554, 2575), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2561, 2575), True, 'import torch.nn as nn\n'), ((2591, 2632), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(2, stride=2, ceil_mode=True)\n', (2603, 2632), True, 'import torch.nn as nn\n'), ((2663, 2767), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(512)', 'out_channels': '(256)', 'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels=512, out_channels=256, kernel_size=(2, 2),\n stride=(2, 2), bias=False)\n', (2681, 2767), True, 'import torch.nn as nn\n'), ((2775, 2879), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(256)', 'out_channels': '(128)', 'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels=256, out_channels=128, kernel_size=(2, 2),\n stride=(2, 2), bias=False)\n', (2793, 2879), True, 'import torch.nn as nn\n'), ((2887, 2990), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels=128, out_channels=64, kernel_size=(2, 2),\n stride=(2, 2), bias=False)\n', (2905, 2990), True, 'import torch.nn as nn\n'), ((2998, 3099), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(6)', 'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels=64, out_channels=6, kernel_size=(2, 2),\n stride=(2, 2), bias=False)\n', (3016, 3099), True, 'import torch.nn as nn\n'), ((3108, 3139), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {'affine': '(True)'}), '(64, affine=True)\n', (3122, 3139), True, 'import torch.nn as nn\n'), ((3154, 3186), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {'affine': '(True)'}), '(128, affine=True)\n', (3168, 3186), True, 'import torch.nn as nn\n'), ((3201, 3233), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {'affine': '(True)'}), '(256, affine=True)\n', (3215, 3233), True, 'import torch.nn as nn\n'), ((3248, 3280), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'affine': '(True)'}), '(512, affine=True)\n', (3262, 3280), True, 'import torch.nn as nn\n'), ((3295, 3327), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'affine': '(True)'}), '(512, affine=True)\n', (3309, 3327), True, 'import torch.nn as nn\n'), ((3341, 3373), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {'affine': '(True)'}), '(256, affine=True)\n', (3355, 3373), True, 'import torch.nn as nn\n'), ((3387, 3419), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {'affine': '(True)'}), '(128, affine=True)\n', (3401, 3419), True, 'import torch.nn as nn\n'), ((3433, 3464), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {'affine': '(True)'}), '(64, affine=True)\n', (3447, 3464), True, 'import torch.nn as nn\n'), ((365, 400), 'torch.randn', 'torch.randn', (['(self.channel, self.n)'], {}), '((self.channel, self.n))\n', (376, 400), False, 'import torch\n'), ((429, 464), 'torch.randn', 'torch.randn', (['(self.channel, self.n)'], {}), '((self.channel, self.n))\n', (440, 464), False, 'import torch\n'), ((1124, 1138), 'torch.exp', 'torch.exp', (['tmp'], {}), '(tmp)\n', (1133, 1138), False, 'import torch\n'), ((3702, 3720), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (3711, 3720), False, 'import math\n'), ((3944, 3962), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (3953, 3962), False, 'import math\n')]
import argparse import json from gensim.models import Word2Vec from tensorflow_core.python.keras.models import load_model import convert import extract import predict import vectorize from annotation import annotate def main(input_file: str, output_file: str): extracted_jsdoc = extract.extract_from_file(input_file) df = convert.convert_func_to_df(extracted_jsdoc) word2vec_code = Word2Vec.load('data/word_vecs/word2vec_model_code.bin') word2vec_lang = Word2Vec.load('data/word_vecs/word2vec_model_language.bin') vectors = vectorize.df_to_vec(df, word2vec_lang, word2vec_code) model = load_model('data/model.h5') with open("data/types.json") as f: types_map = json.load(f) predictions = predict.predict(model, vectors, types_map) annotate.annotate(df, predictions, input_file, output_file) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("input_file_path", type=str, help="Path of the input file") parser.add_argument("output_file_path", type=str, help="Path of the output file") args = parser.parse_args() main(args.input_file_path, args.output_file_path)
[ "json.load", "argparse.ArgumentParser", "vectorize.df_to_vec", "extract.extract_from_file", "tensorflow_core.python.keras.models.load_model", "predict.predict", "convert.convert_func_to_df", "gensim.models.Word2Vec.load", "annotation.annotate.annotate" ]
[((287, 324), 'extract.extract_from_file', 'extract.extract_from_file', (['input_file'], {}), '(input_file)\n', (312, 324), False, 'import extract\n'), ((334, 377), 'convert.convert_func_to_df', 'convert.convert_func_to_df', (['extracted_jsdoc'], {}), '(extracted_jsdoc)\n', (360, 377), False, 'import convert\n'), ((398, 453), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""data/word_vecs/word2vec_model_code.bin"""'], {}), "('data/word_vecs/word2vec_model_code.bin')\n", (411, 453), False, 'from gensim.models import Word2Vec\n'), ((474, 533), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""data/word_vecs/word2vec_model_language.bin"""'], {}), "('data/word_vecs/word2vec_model_language.bin')\n", (487, 533), False, 'from gensim.models import Word2Vec\n'), ((548, 601), 'vectorize.df_to_vec', 'vectorize.df_to_vec', (['df', 'word2vec_lang', 'word2vec_code'], {}), '(df, word2vec_lang, word2vec_code)\n', (567, 601), False, 'import vectorize\n'), ((614, 641), 'tensorflow_core.python.keras.models.load_model', 'load_model', (['"""data/model.h5"""'], {}), "('data/model.h5')\n", (624, 641), False, 'from tensorflow_core.python.keras.models import load_model\n'), ((732, 774), 'predict.predict', 'predict.predict', (['model', 'vectors', 'types_map'], {}), '(model, vectors, types_map)\n', (747, 774), False, 'import predict\n'), ((779, 838), 'annotation.annotate.annotate', 'annotate.annotate', (['df', 'predictions', 'input_file', 'output_file'], {}), '(df, predictions, input_file, output_file)\n', (796, 838), False, 'from annotation import annotate\n'), ((881, 906), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (904, 906), False, 'import argparse\n'), ((701, 713), 'json.load', 'json.load', (['f'], {}), '(f)\n', (710, 713), False, 'import json\n')]
# coding=utf8 """ 方便调试使用 """ from lofka import LofkaHandler,LofkaAsyncHandler import logging import traceback handler = LofkaAsyncHandler() logger = logging.getLogger('test') logger.addHandler(handler) def __debug_method(): try: raise Exception("TestException") except Exception as ex: traceback.format_exc() logger.exception("ErrorTitle") if __name__ == "__main__": __debug_method()
[ "lofka.LofkaAsyncHandler", "traceback.format_exc", "logging.getLogger" ]
[((122, 141), 'lofka.LofkaAsyncHandler', 'LofkaAsyncHandler', ([], {}), '()\n', (139, 141), False, 'from lofka import LofkaHandler, LofkaAsyncHandler\n'), ((151, 176), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (168, 176), False, 'import logging\n'), ((314, 336), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (334, 336), False, 'import traceback\n')]
from bs4 import BeautifulSoup import requests #from webdriver import keep_alive import discord import time from discord.ext import commands bot = commands.Bot(command_prefix='!') bot.remove_command("help") @bot.event async def on_ready(): await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name="")) print(f'Logged in as {bot.user.name}') @commands.command(name="ebay") async def ebay(ctx): message = ctx.message.content.split('!ebay ') url = message[1] pname = str(url) url = url.replace(' ', '+') url ='https://www.ebay.com/sch/i.html?_from=R40&_nkw={}&_sacat=0&_ipg=200'.format(url) print(url) pname=str(pname) embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...'.format(pname), color=0x00bfff) sendd1 = await ctx.send(embed=embeder) x = requests.get(url) time.sleep(2) embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...\n\n`Scraping Prices...`'.format(pname), color=0x00bfff) embeder.set_thumbnail(url='https://cdn0.iconfinder.com/data/icons/big-file-flat/32/02_Cloud_Computing_computer_internet_file_data-512.png') sendd = await sendd1.edit(embed=embeder) soup = BeautifulSoup(x.content, 'html.parser') together = [] allitems = soup.find_all('span', class_='s-item__price') for price in allitems: price = price.text price = price.replace('$', '') price = price.replace(',', '') if 'to' in price: pass else: price = int(float(price)) together.append(price) embeder = discord.Embed(title='Processing...', description='Looking for `{}`.\n\nThis may take some time...\n\n`Doing math...`'.format(pname), color=0x00bfff) embeder.set_thumbnail(url='https://www.pngmart.com/files/7/Calculator-PNG-Picture.png') new = await sendd1.edit(embed=embeder) time.sleep(4) items = len(together) total = sum(together) average = total/items average = round(average, 2) average = str(average) items = str(items) embeder = discord.Embed(title='Done', description='Here is what I found.\n\nItem : `{}`\n\nAverage Price : `${}`\n\nTotal number of items counted : `{}`'.format(pname,average,items), color=0x00bfff) embeder.set_thumbnail(url='https://assets.stickpng.com/thumbs/5aa78e207603fc558cffbf19.png') sendd = await sendd1.edit(embed=embeder) bot.add_command(ebay) #keep_alive() bot.run('bot-token')
[ "discord.Activity", "discord.ext.commands.command", "time.sleep", "requests.get", "discord.ext.commands.Bot", "bs4.BeautifulSoup" ]
[((147, 179), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""'}), "(command_prefix='!')\n", (159, 179), False, 'from discord.ext import commands\n'), ((412, 441), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""ebay"""'}), "(name='ebay')\n", (428, 441), False, 'from discord.ext import commands\n'), ((891, 908), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (903, 908), False, 'import requests\n'), ((911, 924), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (921, 924), False, 'import time\n'), ((1287, 1326), 'bs4.BeautifulSoup', 'BeautifulSoup', (['x.content', '"""html.parser"""'], {}), "(x.content, 'html.parser')\n", (1300, 1326), False, 'from bs4 import BeautifulSoup\n'), ((1920, 1933), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1930, 1933), False, 'import time\n'), ((307, 368), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.watching', 'name': '""""""'}), "(type=discord.ActivityType.watching, name='')\n", (323, 368), False, 'import discord\n')]
import time # print( time.time()) def timmer(func): def wrapper(): start_time = time.time() func() stop_time = time.time() print("运行时间是 %s 秒 " % (stop_time - start_time)) return wrapper @timmer def i_can_sleep(): time.sleep(3) # start_time = time.time() i_can_sleep() # stop_time = time.time() # print('函数运行了 %s 秒' %(stop_time-start_time))
[ "time.time", "time.sleep" ]
[((262, 275), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (272, 275), False, 'import time\n'), ((94, 105), 'time.time', 'time.time', ([], {}), '()\n', (103, 105), False, 'import time\n'), ((141, 152), 'time.time', 'time.time', ([], {}), '()\n', (150, 152), False, 'import time\n')]
#!/usr/bin/python3 import cv2 import cv2IP if __name__ == '__main__': IP = cv2IP.BaseIP() img = IP.ImRead("img/test.jpg") IP.ImWindow("foreGround") IP.ImShow("foreGround", img) cv2.waitKey(0) del IP
[ "cv2.waitKey", "cv2IP.BaseIP" ]
[((81, 95), 'cv2IP.BaseIP', 'cv2IP.BaseIP', ([], {}), '()\n', (93, 95), False, 'import cv2IP\n'), ((199, 213), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (210, 213), False, 'import cv2\n')]
from __future__ import print_function, division import torch import os import pandas as pd from skimage import io, transform import numpy as np from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import sklearn import sklearn.metrics as sklm import csv import argparse import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable import numpy as np import torchvision from torchvision import datasets, models, transforms import time import os import pickle import random from shutil import copyfile from shutil import rmtree import torchvision use_gpu = torch.cuda.is_available() print("We see GPU:") print(use_gpu) print("Let's use", torch.cuda.device_count(), "GPUs!") from PIL import Image import CXRDataset as CXR import Eval as E from importlib import reload reload(CXR) reload(E) def checkpoint(model_ft, best_acc, best_loss, epoch,PRED_LABEL,LR,RESULT_PATH): """ save checkpoint args: model_ft: torchvision model best_acc: best accuracy achieved so far in training best_loss: best loss achieved so far in training epoch: last epoch of training PRED_LABEL: what we're predicting; expect format ["Pneumonia"] or ["Pneumonia","Opacity"]... etc LR: learning rate RESULT_PATH: path to save this to returns: nothing (saves file) """ # Save checkpoint. print('Saving..') state = { 'model_ft': model_ft, 'best_acc': best_acc, 'best_loss': best_loss, 'epoch': epoch, 'rng_state': torch.get_rng_state(), 'LR':LR } torch.save(state, RESULT_PATH+'checkpoint_'+PRED_LABEL) def train_model(model, criterion, optimizer, LR, num_epochs=5,dataloaders="x",dataset_sizes="x", PRED_LABEL="x", start_epoch=1,MULTILABEL=True,FOLD_OVERRIDE="",TRAIN_FILTER="",RESULT_PATH="results/",MULTICLASS=False): """ performs torchvision model training args: model: model to fine tune criterion: pytorch optimization criteria optimizer: pytorch optimizer LR: learning rate num_epochs: stop after this many epochs dataloaders: torchvision dataloader dataset_sizes: length of train/val datasets PRED_LABEL: targets we're predicting in list format ["PNA","Opacity"] etc start_epoch: in case of loading saved model; not currently used MULTILABEL: should be removed - always True - everything is trained using multilabel list format now even single labels ["Pneumonia"] FOLD_OVERRIDE: columns of scalars with train/val/test split TRAIN_FILTER: list of data we're training on, used for labeling results RESULT_PATH= path at which resutls are saved, recommend leaving default to use with other scripts MULTICLASS: if training on single multiclass n>2 target; currently only implemented for single multiclass target. returns: model: trained torchvision model best_epoch: epoch on which best model was achieved """ since = time.time() best_acc = 0.0 best_loss=999999 best_epoch=-1 last_train_acc=-1 last_train_loss=-1 for epoch in range(start_epoch,num_epochs+1): print('Epoch {}/{}'.format(epoch, num_epochs)) print('-' * 10) #small_data flag used to decide on how to decay small_data=False if dataset_sizes['train']<=10000: small_data=True iter_at_lr=0 # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train(True) # Set model to training mode else: model.train(False) # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. i=0 total_done=0 for data in dataloaders[phase]: i+=1 # get the inputs inputs, labels = data batch_size= inputs.shape[0] if use_gpu: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() #needed for multilabel training which uses different loss and expects floats if not MULTICLASS: labels = labels.float() outputs = model(inputs) _, preds = torch.max(outputs.data, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics if MULTICLASS: # need to fix this for multilabel running_corrects += torch.sum(preds == labels.long().data) running_loss += loss.data[0]*batch_size epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects / dataset_sizes[phase] if phase=='train': last_train_acc=epoch_acc last_train_loss=epoch_loss print(phase+' epoch {}:loss {:.4f} acc: {:.4f} with data size {}'.format( epoch, epoch_loss, epoch_acc, dataset_sizes[phase])) #decay if not best if phase == 'val' and epoch_loss > best_loss: #normally we just decay if no improvement in val loss in epoch, but not ideal with small datasets #so 'small_data' condition that insists on 5 passes at lr if dataset size <=10k if small_data==False or iter_at_lr>=4: print("decay loss from "+str(LR)+" to "+str(LR/10)+" as not seeing improvement in val loss") LR = LR / 10 #making a new optimizer zeros out momentum optimizer = optim.SGD(filter(lambda p:p.requires_grad, model.parameters()), lr = LR, momentum=0.9, weight_decay=1e-4) iter_at_lr=0 else: iter_at_lr+=1 #below is used for labeling results trainstring = str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","") # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = model.state_dict() if phase == 'val' and epoch_loss < best_loss: best_loss = epoch_loss best_epoch = epoch #save stuff if we have a best model write_label = str(PRED_LABEL) write_label = "Multilabel" checkpoint(model, best_acc, best_loss, epoch, RESULT_PATH+write_label+"_train_"+trainstring+"_"+FOLD_OVERRIDE,LR,RESULT_PATH=RESULT_PATH) write_label = "multilabel_" + trainstring + "_" + FOLD_OVERRIDE if phase== 'val': with open(RESULT_PATH+"log_train_"+write_label, 'a') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow([write_label, epoch, last_train_loss, last_train_acc, epoch_loss, epoch_acc]) total_done+=batch_size if(total_done % (100*batch_size) == 0): print("completed "+str(total_done)+" so far in epoch") #quit if 3 epochs no improvement if ((epoch-best_epoch)>=3 and small_data==False) or ((epoch-best_epoch)>=15 and small_data==True): print("no improvement in 3 epochs, break") break time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights and return them checkpoint_best = torch.load(RESULT_PATH+"checkpoint_results/Multilabel_train_"+trainstring+"_"+FOLD_OVERRIDE) model = checkpoint_best['model_ft'] return model, best_epoch def give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE): """ args: LABEL_PATH: path to the scalars file PRED_LABEL: list of targets we're predicting BALANCE_MODE: deprecated TRAIN_FILTER: list of dataset we're training on, needed for dataloader MULTILABEL: deprecated, always true FOLD_OVERRIDE: train/val/test split column name in scalars BATCH_SIZE: passes batch for dataloader returns: mean: rgb channel means np array 3x1 std:: rgb channel std np array 3x1 """ #create set of val transforms data_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(224), transforms.CenterCrop(224), #needed to get 224x224 transforms.ToTensor() ]) #make dataloader transformed_dataset =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transform, balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=0,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH="ignore",MULTICLASS=MULTICLASS) dataloader = torch.utils.data.DataLoader(transformed_dataset, batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler) #calculate some means and st devs x = len(dataloader)*BATCH_SIZE print("len dataloader for give_mean_var:"+str(x)) means = np.empty((x,3)) stds = np.empty((x,3)) means[:,:]=np.nan stds[:,:]=np.nan for data in dataloader: inputs, _ = data inputs=inputs.numpy() for i in range(0,inputs.shape[0]): for j in range(0,3): means[i,j]=np.mean(inputs[i,j,:,:]) stds[i,j]=np.std(inputs[i,j,:,:]) mean = np.zeros(3) std = np.zeros(3) for j in range (0,3): x=np.nanmean(means[:,j]) mean[j]=x x=np.nanmean(stds[:,j]) std[j]=x return mean, std def train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,FREEZE_LAYERS, NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE,MULTICLASS,OUTPUT1024): """ make dataloader, instantiates torchvision model, calls training function, returns results args: PRED_LABEL: list of labels to predict ["pna","opacity"] etc LR: learning rate BATCH_SIZE: batch size for dataloader; too big and won't fit on gpu LABEL_PATH: path to scalars RESULT_PATH: path to write results BALANCE_MODE: deprecated FREEZE_LAYERS: deprecated NUM_EPOCHS: max number of epochs to train for; may quit sooner if not improving TRAIN_FILTER: list of sites we're training on PRED_FILTER: list of sites we're predicting MULTILABEL: deprecated FOLD_OVERRIDE: train/val/test split column in scalars TRAIN_SAMPLE: sample training data to get limited sample (for testing) PRED_SAMPLE: sample test data to get limited sample (for testing) CUSTOM_NORMALIZE: use normalization mean, std based on data not imagenet NET_TYPE: deprecated MULTICLASS: train to single multiclass n>2 target (not implemented for multilabel multiclass) returns: x: df with predictions """ #if we were using custom normalization and not imagenet, do this; it didn't help vs imagenet nornmalization if CUSTOM_NORMALIZE: mean, std = give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE) print(mean) print(std) elif not CUSTOM_NORMALIZE: mean= [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] #torchvision transforms df = pd.read_csv(LABEL_PATH,index_col=0) data_transforms = { 'train': transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(224), #244 transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std) ]), 'val': transforms.Compose([ transforms.Scale(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std) ]), } #make dataloader transformed_datasets={} transformed_datasets['train'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transforms['train'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS) transformed_datasets['val'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='val', PRED_LABEL=PRED_LABEL, transform=data_transforms['val'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS) dataloaders={} dataloaders['train'] = torch.utils.data.DataLoader(transformed_datasets['train'], batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler) dataloaders['val'] = torch.utils.data.DataLoader(transformed_datasets['val'], batch_size=BATCH_SIZE, shuffle=True, num_workers=8) #instantiate model if not use_gpu: raise ValueError("Error, requires GPU") print('==> Building model..') if(NET_TYPE=="densenet121"): print("using densenet121") model_ft = models.densenet121(pretrained=True) num_ftrs = model_ft.classifier.in_features if(OUTPUT1024==False): print("adding bottleneck=15 features") #if multiclass, needs different output structure then regular training to list of binary taragets if not MULTICLASS: model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, len(PRED_LABEL)),nn.Sigmoid()) elif MULTICLASS: model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, transformed_datasets['train'].n_class)) print("n_class "+str(transformed_datasets['train'].n_class)) elif(OUTPUT1024==True): print("NOT adding bottleneck=15 features") #if multiclass, needs different output structure then regular training to list of binary taragets if not MULTICLASS: model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, len(PRED_LABEL)),nn.Sigmoid()) elif MULTICLASS: model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, transformed_datasets['train'].n_class)) print("n_class "+str(transformed_datasets['train'].n_class)) start_epoch = 1 print("loading model_ft onto gpu") model_ft = model_ft.cuda() if NET_TYPE=="densenet121": if(MULTICLASS==False): criterion = nn.BCELoss() else: criterion = nn.CrossEntropyLoss() # only using this for predicting site, department optimizer_ft = optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), lr=LR, momentum=0.9, weight_decay=1e-4) dataset_sizes = {x: len(transformed_datasets[x]) for x in ['train', 'val']} #train model_ft , best_epoch = train_model(model_ft, criterion, optimizer_ft, LR, num_epochs=NUM_EPOCHS,dataloaders=dataloaders,dataset_sizes=dataset_sizes, PRED_LABEL=PRED_LABEL, start_epoch=start_epoch,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS) #make preds on test x = E.make_pred_multilabel(data_transforms,model_ft,"pred_"+str(PRED_LABEL), LABEL_PATH,RESULT_PATH,PRED_LABEL,TRAIN_FILTER,PRED_FILTER,FOLD_OVERRIDE,PRED_SAMPLE,MULTICLASS,OUTPUT1024) return x def train_cnn(LABEL_PATH, PRED_LABEL,TRAIN_FILTER,PRED_FILTER,BALANCE_MODE,FOLD_OVERRIDE,MULTICLASS=False,OUTPUT1024=False): """ main function that gets called externally to train LABEL_PATH: path to scalars PRED_LABEL: targets to predict; list ["pna","opacity"] etc as in scalars file TRAIN_FILTER: list of sites we're training to ["nih","msh"] PRED_FILTER: list of sites we're predicting ["nih","iu"] BALANCE_MODE: deprecated FOLD_OVERRIDE: the column of scalars we use for train val test split MULTICLASS: train to single multiclass n>2 target returns: y: results """ NUM_EPOCHS=50 BATCH_SIZE=16 LR = 0.01 RESULT_PATH="results/" FREEZE_LAYERS="no" MULTILABEL = not isinstance(PRED_LABEL, str) TRAIN_SAMPLE=0 PRED_SAMPLE =0 CUSTOM_NORMALIZE=False NET_TYPE="densenet121" if not os.path.exists(RESULT_PATH): os.makedirs(RESULT_PATH) if not os.path.exists(RESULT_PATH+"checkpoint_results/"): os.makedirs(RESULT_PATH+"checkpoint_results/") x = train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,"layer4",NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE, MULTICLASS,OUTPUT1024) y = pd.read_csv(LABEL_PATH) y=y[['img_id']] y = y.merge(x,on="img_id",how="inner") trainlist=str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","") y.to_csv(RESULT_PATH+"preds_train_"+trainlist+"_"+FOLD_OVERRIDE+".csv",index=False) return y
[ "pandas.read_csv", "numpy.empty", "torch.get_rng_state", "torch.cuda.device_count", "numpy.mean", "torchvision.transforms.Normalize", "numpy.nanmean", "torch.nn.BCELoss", "torch.utils.data.DataLoader", "torchvision.transforms.Scale", "numpy.std", "CXRDataset.CXRDataset", "torch.load", "os.path.exists", "torch.nn.Linear", "torchvision.transforms.CenterCrop", "csv.writer", "torchvision.transforms.RandomHorizontalFlip", "torch.autograd.Variable", "torch.cuda.is_available", "torch.max", "torch.nn.Sigmoid", "os.makedirs", "torchvision.models.densenet121", "numpy.zeros", "torch.nn.CrossEntropyLoss", "time.time", "torch.save", "importlib.reload", "torchvision.transforms.ToTensor" ]
[((673, 698), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (696, 698), False, 'import torch\n'), ((885, 896), 'importlib.reload', 'reload', (['CXR'], {}), '(CXR)\n', (891, 896), False, 'from importlib import reload\n'), ((897, 906), 'importlib.reload', 'reload', (['E'], {}), '(E)\n', (903, 906), False, 'from importlib import reload\n'), ((754, 779), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (777, 779), False, 'import torch\n'), ((1701, 1760), 'torch.save', 'torch.save', (['state', "(RESULT_PATH + 'checkpoint_' + PRED_LABEL)"], {}), "(state, RESULT_PATH + 'checkpoint_' + PRED_LABEL)\n", (1711, 1760), False, 'import torch\n'), ((3155, 3166), 'time.time', 'time.time', ([], {}), '()\n', (3164, 3166), False, 'import time\n'), ((8337, 8441), 'torch.load', 'torch.load', (["(RESULT_PATH + 'checkpoint_results/Multilabel_train_' + trainstring + '_' +\n FOLD_OVERRIDE)"], {}), "(RESULT_PATH + 'checkpoint_results/Multilabel_train_' +\n trainstring + '_' + FOLD_OVERRIDE)\n", (8347, 8441), False, 'import torch\n'), ((9386, 9687), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""train"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': 'data_transform', 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': '(0)', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': '"""ignore"""', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL,\n transform=data_transform, balance_classes=BALANCE_MODE, FILTER=\n TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=0, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH='ignore', MULTICLASS=\n MULTICLASS)\n", (9400, 9687), True, 'import CXRDataset as CXR\n'), ((9686, 9790), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['transformed_dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), '(transformed_dataset, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=8)\n', (9713, 9790), False, 'import torch\n'), ((9968, 9984), 'numpy.empty', 'np.empty', (['(x, 3)'], {}), '((x, 3))\n', (9976, 9984), True, 'import numpy as np\n'), ((9996, 10012), 'numpy.empty', 'np.empty', (['(x, 3)'], {}), '((x, 3))\n', (10004, 10012), True, 'import numpy as np\n'), ((10350, 10361), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10358, 10361), True, 'import numpy as np\n'), ((10372, 10383), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10380, 10383), True, 'import numpy as np\n'), ((12400, 12436), 'pandas.read_csv', 'pd.read_csv', (['LABEL_PATH'], {'index_col': '(0)'}), '(LABEL_PATH, index_col=0)\n', (12411, 12436), True, 'import pandas as pd\n'), ((13037, 13360), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""train"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': "data_transforms['train']", 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': 'TRAIN_SAMPLE', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': 'RESULT_PATH', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL,\n transform=data_transforms['train'], balance_classes=BALANCE_MODE,\n FILTER=TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=TRAIN_SAMPLE, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH=RESULT_PATH,\n MULTICLASS=MULTICLASS)\n", (13051, 13360), True, 'import CXRDataset as CXR\n'), ((13372, 13692), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""val"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': "data_transforms['val']", 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': 'TRAIN_SAMPLE', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': 'RESULT_PATH', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='val', PRED_LABEL=PRED_LABEL,\n transform=data_transforms['val'], balance_classes=BALANCE_MODE, FILTER=\n TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=TRAIN_SAMPLE, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH=RESULT_PATH,\n MULTICLASS=MULTICLASS)\n", (13386, 13692), True, 'import CXRDataset as CXR\n'), ((13716, 13831), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["transformed_datasets['train']"], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), "(transformed_datasets['train'], batch_size=\n BATCH_SIZE, shuffle=True, num_workers=8)\n", (13743, 13831), False, 'import torch\n'), ((13870, 13983), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["transformed_datasets['val']"], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), "(transformed_datasets['val'], batch_size=\n BATCH_SIZE, shuffle=True, num_workers=8)\n", (13897, 13983), False, 'import torch\n'), ((17905, 17928), 'pandas.read_csv', 'pd.read_csv', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (17916, 17928), True, 'import pandas as pd\n'), ((1642, 1663), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (1661, 1663), False, 'import torch\n'), ((8093, 8104), 'time.time', 'time.time', ([], {}), '()\n', (8102, 8104), False, 'import time\n'), ((10425, 10448), 'numpy.nanmean', 'np.nanmean', (['means[:, j]'], {}), '(means[:, j])\n', (10435, 10448), True, 'import numpy as np\n'), ((10476, 10498), 'numpy.nanmean', 'np.nanmean', (['stds[:, j]'], {}), '(stds[:, j])\n', (10486, 10498), True, 'import numpy as np\n'), ((14222, 14257), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (14240, 14257), False, 'from torchvision import datasets, models, transforms\n'), ((17483, 17510), 'os.path.exists', 'os.path.exists', (['RESULT_PATH'], {}), '(RESULT_PATH)\n', (17497, 17510), False, 'import os\n'), ((17520, 17544), 'os.makedirs', 'os.makedirs', (['RESULT_PATH'], {}), '(RESULT_PATH)\n', (17531, 17544), False, 'import os\n'), ((17556, 17607), 'os.path.exists', 'os.path.exists', (["(RESULT_PATH + 'checkpoint_results/')"], {}), "(RESULT_PATH + 'checkpoint_results/')\n", (17570, 17607), False, 'import os\n'), ((17615, 17663), 'os.makedirs', 'os.makedirs', (["(RESULT_PATH + 'checkpoint_results/')"], {}), "(RESULT_PATH + 'checkpoint_results/')\n", (17626, 17663), False, 'import os\n'), ((9172, 9205), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (9203, 9205), False, 'from torchvision import datasets, models, transforms\n'), ((9220, 9241), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (9236, 9241), False, 'from torchvision import datasets, models, transforms\n'), ((9251, 9277), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (9272, 9277), False, 'from torchvision import datasets, models, transforms\n'), ((9310, 9331), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9329, 9331), False, 'from torchvision import datasets, models, transforms\n'), ((15637, 15649), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (15647, 15649), True, 'import torch.nn as nn\n'), ((15688, 15709), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (15707, 15709), True, 'import torch.nn as nn\n'), ((4764, 4790), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4773, 4790), False, 'import torch\n'), ((10246, 10273), 'numpy.mean', 'np.mean', (['inputs[i, j, :, :]'], {}), '(inputs[i, j, :, :])\n', (10253, 10273), True, 'import numpy as np\n'), ((10297, 10323), 'numpy.std', 'np.std', (['inputs[i, j, :, :]'], {}), '(inputs[i, j, :, :])\n', (10303, 10323), True, 'import numpy as np\n'), ((12510, 12543), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (12541, 12543), False, 'from torchvision import datasets, models, transforms\n'), ((12566, 12587), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (12582, 12587), False, 'from torchvision import datasets, models, transforms\n'), ((12606, 12632), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (12627, 12632), False, 'from torchvision import datasets, models, transforms\n'), ((12646, 12667), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12665, 12667), False, 'from torchvision import datasets, models, transforms\n'), ((12681, 12712), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (12701, 12712), False, 'from torchvision import datasets, models, transforms\n'), ((12773, 12794), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (12789, 12794), False, 'from torchvision import datasets, models, transforms\n'), ((12808, 12834), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (12829, 12834), False, 'from torchvision import datasets, models, transforms\n'), ((12848, 12869), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12867, 12869), False, 'from torchvision import datasets, models, transforms\n'), ((12883, 12914), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (12903, 12914), False, 'from torchvision import datasets, models, transforms\n'), ((7540, 7574), 'csv.writer', 'csv.writer', (['logfile'], {'delimiter': '""","""'}), "(logfile, delimiter=',')\n", (7550, 7574), False, 'import csv\n'), ((14584, 14607), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(15)'], {}), '(num_ftrs, 15)\n', (14593, 14607), True, 'import torch.nn as nn\n'), ((14640, 14652), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (14650, 14652), True, 'import torch.nn as nn\n'), ((4366, 4382), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4374, 4382), False, 'from torch.autograd import Variable\n'), ((4384, 4400), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (4392, 4400), False, 'from torch.autograd import Variable\n'), ((14735, 14758), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(15)'], {}), '(num_ftrs, 15)\n', (14744, 14758), True, 'import torch.nn as nn\n'), ((14760, 14812), 'torch.nn.Linear', 'nn.Linear', (['(15)', "transformed_datasets['train'].n_class"], {}), "(15, transformed_datasets['train'].n_class)\n", (14769, 14812), True, 'import torch.nn as nn\n'), ((15208, 15220), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (15218, 15220), True, 'import torch.nn as nn\n'), ((15303, 15361), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', "transformed_datasets['train'].n_class"], {}), "(num_ftrs, transformed_datasets['train'].n_class)\n", (15312, 15361), True, 'import torch.nn as nn\n')]
# coding: utf-8 # # This code is part of lattpy. # # Copyright (c) 2021, <NAME> # # This code is licensed under the MIT License. The copyright notice in the # LICENSE file in the root directory and this permission notice shall # be included in all copies or substantial portions of the Software. """Contains miscellaneous utility methods.""" import logging from typing import Iterable, List, Sequence, Optional, Union, Tuple import time import numpy as np __all__ = [ "ArrayLike", "logger", "LatticeError", "ConfigurationError", "SiteOccupiedError", "NoAtomsError", "NoBaseNeighborsError", "NotBuiltError", "Timer", "min_dtype", "chain", "create_lookup_table", "frmt_num", "frmt_bytes", "frmt_time", ] # define type for numpy `array_like` types ArrayLike = Union[int, float, Iterable, np.ndarray] # Configure package logger logger = logging.getLogger("lattpy") _CH = logging.StreamHandler() _CH.setLevel(logging.DEBUG) _FRMT_STR = "[%(asctime)s] %(levelname)-8s - %(name)-15s - %(funcName)-25s - %(message)s" _FRMT = logging.Formatter(_FRMT_STR, datefmt='%H:%M:%S') _CH.setFormatter(_FRMT) # Add formatter to stream handler logger.addHandler(_CH) # Add stream handler to package logger logger.setLevel(logging.WARNING) # Set initial logging level class LatticeError(Exception): pass class ConfigurationError(LatticeError): @property def msg(self): return self.args[0] @property def hint(self): return self.args[1] def __str__(self): msg, hint = self.args if hint: msg += f" ({hint})" return msg class SiteOccupiedError(ConfigurationError): def __init__(self, atom, pos): super().__init__(f"Can't add {atom} to lattice, position {pos} already occupied!") class NoAtomsError(ConfigurationError): def __init__(self): super().__init__("lattice doesn't contain any atoms", "use 'add_atom' to add an 'Atom'-object") class NoBaseNeighborsError(ConfigurationError): def __init__(self): msg = "base neighbors not configured" hint = "call 'set_num_neighbors' after adding atoms or " \ "use the 'neighbors' keyword of 'add_atom'" super().__init__(msg, hint) class NotBuiltError(ConfigurationError): def __init__(self): msg = "lattice has not been built" hint = "use the 'build' method to construct a finite size lattice model" super().__init__(msg, hint) def create_lookup_table(array: ArrayLike, dtype: Optional[Union[str, np.dtype]] = np.uint8) \ -> Tuple[np.ndarray, np.ndarray]: """Converts the given array to an array of indices linked to the unique values. Parameters ---------- array : array_like dtype : int or np.dtype, optional Optional data-type for storing the indices of the unique values. By default `np.uint8` is used, since it is assumed that the input-array has only a few unique values. Returns ------- values : np.ndarray The unique values occuring in the input-array. indices : np.ndarray The corresponding indices in the same shape as the input-array. """ values = np.sort(np.unique(array)) indices = np.zeros_like(array, dtype=dtype) for i, x in enumerate(values): mask = array == x indices[mask] = i return values, indices def min_dtype(a: Union[int, float, np.ndarray, Iterable], signed: Optional[bool] = True) -> np.dtype: """Returns the minimum required dtype to store the given values. Parameters ---------- a : array_like One or more values for determining the dtype. Should contain the maximal expected values. signed : bool, optional If `True` the dtype is forced to be signed. The default is `True`. Returns ------- dtype : dtype The required dtype. """ if signed: a = -np.max(np.abs(a))-1 else: amin, amax = np.min(a), np.max(a) if amin < 0: a = - amax - 1 if abs(amin) <= amax else amin else: a = amax return np.dtype(np.min_scalar_type(a)) def chain(items: Sequence, cycle: bool = False) -> List: """Creates a chain between items Parameters ---------- items : Sequence items to join to chain cycle : bool, optional cycle to the start of the chain if True, default: False Returns ------- chain: list chain of items Example ------- >>> print(chain(["x", "y", "z"])) [['x', 'y'], ['y', 'z']] >>> print(chain(["x", "y", "z"], True)) [['x', 'y'], ['y', 'z'], ['z', 'x']] """ result = list() for i in range(len(items)-1): result.append([items[i], items[i+1]]) if cycle: result.append([items[-1], items[0]]) return result def frmt_num(num: float, dec: Optional[int] = 1, unit: Optional[str] = '', div: Optional[float] = 1000.) -> str: """Returns a formatted string of a number. Parameters ---------- num : float The number to format. dec : int, optional Number of decimals. The default is 1. unit : str, optional Optional unit suffix. By default no unit-strinmg is used. div : float, optional The divider used for units. The default is 1000. Returns ------- num_str: str """ for prefix in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < div: return f"{num:.{dec}f}{prefix}{unit}" num /= div return f"{num:.{dec}f}Y{unit}" def frmt_bytes(num: float, dec: Optional[int] = 1) -> str: """Returns a formatted string of the number of bytes.""" return frmt_num(num, dec, unit="iB", div=1024) def frmt_time(seconds: float, short: bool = False, width: int = 0) -> str: """Returns a formated string for a given time in seconds. Parameters ---------- seconds : float Time value to format short : bool, optional Flag if short representation should be used. width : int, optional Optional minimum length of the returned string. Returns ------- time_str: str """ string = "00:00" # short time string if short: if seconds > 0: mins, secs = divmod(seconds, 60) if mins > 60: hours, mins = divmod(mins, 60) string = f"{hours:02.0f}:{mins:02.0f}h" else: string = f"{mins:02.0f}:{secs:02.0f}" # Full time strings else: if seconds < 1e-3: nanos = 1e6 * seconds string = f"{nanos:.0f}\u03BCs" elif seconds < 1: millis = 1000 * seconds string = f"{millis:.1f}ms" elif seconds < 60: string = f"{seconds:.1f}s" else: mins, seconds = divmod(seconds, 60) if mins < 60: string = f"{mins:.0f}:{seconds:04.1f}min" else: hours, mins = divmod(mins, 60) string = f"{hours:.0f}:{mins:02.0f}:{seconds:02.0f}h" if width > 0: string = f"{string:>{width}}" return string class Timer: """Timer object for easy time measuring.""" __slots__ = ["_time", "_t0"] def __init__(self, method=None): self._time = method or time.perf_counter self._t0 = 0 self.start() @property def seconds(self) -> float: """Returns the time since the timer has been started in seconds.""" return self.time() - self._t0 @property def millis(self) -> float: """Returns the time since the timer has been started in milliseconds.""" return 1000 * (self.time() - self._t0) def time(self) -> float: """Returns the current time as a timestamp.""" return self._time() def start(self) -> None: """Start the timer.""" self._t0 = self._time() def eta(self, progress: float) -> float: """Approximates the time left for a task. Parameters ---------- progress: float Progress fraction of task. Returns ------- eta: float Approximation of time left. """ if not progress: return 0.0 return (1 / progress - 1) * self.time() def strfrmt(self, short: bool = False, width: int = 0) -> str: """Formats the time since the timer has been started.""" return frmt_time(self.seconds, short, width) def __repr__(self) -> str: return f'{self.__class__.__name__}({self.strfrmt(short=True)})' def __str__(self) -> str: return self.strfrmt(short=True)
[ "numpy.zeros_like", "numpy.abs", "logging.StreamHandler", "logging.getLogger", "logging.Formatter", "numpy.min_scalar_type", "numpy.min", "numpy.max", "numpy.unique" ]
[((851, 878), 'logging.getLogger', 'logging.getLogger', (['"""lattpy"""'], {}), "('lattpy')\n", (868, 878), False, 'import logging\n'), ((886, 909), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (907, 909), False, 'import logging\n'), ((1038, 1086), 'logging.Formatter', 'logging.Formatter', (['_FRMT_STR'], {'datefmt': '"""%H:%M:%S"""'}), "(_FRMT_STR, datefmt='%H:%M:%S')\n", (1055, 1086), False, 'import logging\n'), ((3290, 3323), 'numpy.zeros_like', 'np.zeros_like', (['array'], {'dtype': 'dtype'}), '(array, dtype=dtype)\n', (3303, 3323), True, 'import numpy as np\n'), ((3258, 3274), 'numpy.unique', 'np.unique', (['array'], {}), '(array)\n', (3267, 3274), True, 'import numpy as np\n'), ((4197, 4218), 'numpy.min_scalar_type', 'np.min_scalar_type', (['a'], {}), '(a)\n', (4215, 4218), True, 'import numpy as np\n'), ((4042, 4051), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (4048, 4051), True, 'import numpy as np\n'), ((4053, 4062), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (4059, 4062), True, 'import numpy as np\n'), ((3998, 4007), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4004, 4007), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- from django.db import models from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import get_user_model from model_utils.models import TimeStampedModel from annoying.fields import JSONField from geolite2 import geolite2 from .utils import string_generator class URL(TimeStampedModel): """ Description: Model Description """ user = models.ForeignKey(get_user_model(), null=True, on_delete=models.CASCADE) short_url = models.SlugField(max_length=6, primary_key=True, unique=True) long_url = models.URLField(max_length=100, unique=True) created = models.DateTimeField(auto_now=True) count = models.IntegerField(default=0) def __str__(self): return self.long_url def get_absolute_url(self): return reverse( 'url_shorter:detail', kwargs={'short_url': self.short_url} ) def save(self, *args, **kwargs): if not self.short_url: self.short_url = string_generator() return super(URL, self).save(*args, **kwargs) class Hit(TimeStampedModel): """ Description: Model Description """ url = models.ForeignKey(URL, on_delete=models.CASCADE) ip = models.CharField(default=None, null=True, max_length=50) data = JSONField(default=None, null=True) class Meta: pass def save(self, *args, **kwargs): if self.ip: reader = geolite2.reader() data = reader.get(self.ip) self.data = data geolite2.close() return super(Hit, self).save(*args, **kwargs)
[ "django.db.models.URLField", "django.db.models.ForeignKey", "django.db.models.CharField", "geolite2.geolite2.close", "django.contrib.auth.get_user_model", "geolite2.geolite2.reader", "django.db.models.SlugField", "annoying.fields.JSONField", "django.urls.reverse", "django.db.models.IntegerField", "django.db.models.DateTimeField" ]
[((529, 590), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(6)', 'primary_key': '(True)', 'unique': '(True)'}), '(max_length=6, primary_key=True, unique=True)\n', (545, 590), False, 'from django.db import models\n'), ((606, 650), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (621, 650), False, 'from django.db import models\n'), ((665, 700), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (685, 700), False, 'from django.db import models\n'), ((713, 743), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (732, 743), False, 'from django.db import models\n'), ((1212, 1260), 'django.db.models.ForeignKey', 'models.ForeignKey', (['URL'], {'on_delete': 'models.CASCADE'}), '(URL, on_delete=models.CASCADE)\n', (1229, 1260), False, 'from django.db import models\n'), ((1270, 1326), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'None', 'null': '(True)', 'max_length': '(50)'}), '(default=None, null=True, max_length=50)\n', (1286, 1326), False, 'from django.db import models\n'), ((1338, 1372), 'annoying.fields.JSONField', 'JSONField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (1347, 1372), False, 'from annoying.fields import JSONField\n'), ((457, 473), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (471, 473), False, 'from django.contrib.auth import get_user_model\n'), ((845, 912), 'django.urls.reverse', 'reverse', (['"""url_shorter:detail"""'], {'kwargs': "{'short_url': self.short_url}"}), "('url_shorter:detail', kwargs={'short_url': self.short_url})\n", (852, 912), False, 'from django.urls import reverse\n'), ((1482, 1499), 'geolite2.geolite2.reader', 'geolite2.reader', ([], {}), '()\n', (1497, 1499), False, 'from geolite2 import geolite2\n'), ((1580, 1596), 'geolite2.geolite2.close', 'geolite2.close', ([], {}), '()\n', (1594, 1596), False, 'from geolite2 import geolite2\n')]
from __future__ import annotations import decimal import logging from typing import Any, Callable, Dict, Mapping, Optional, Set import workflows MessageCallback = Callable[[Mapping[str, Any], Any], None] class CommonTransport: """A common transport class, containing e.g. the logic to manage subscriptions and transactions.""" __callback_interceptor = None __subscriptions: Dict[int, Dict[str, Any]] = {} __subscription_id: int = 0 __transactions: Set[int] = set() __transaction_id: int = 0 log = logging.getLogger("workflows.transport") # # -- High level communication calls ---------------------------------------- # @classmethod def add_command_line_options(cls, parser): """Function to inject command line parameters.""" pass def connect(self) -> bool: """Connect the transport class. This function must be overridden. :return: True-like value when connection successful, False-like value otherwise.""" return False def is_connected(self) -> bool: """Returns the current connection status. This function must be overridden. :return: True-like value when connection is available, False-like value otherwise.""" return False def disconnect(self): """Gracefully disconnect the transport class. This function should be overridden.""" def subscribe(self, channel, callback, **kwargs) -> int: """Listen to a queue, notify via callback function. :param channel: Queue name to subscribe to :param callback: Function to be called when messages are received. The callback will pass two arguments, the header as a dictionary structure, and the message. :param **kwargs: Further parameters for the transport layer. For example disable_mangling: Receive messages as unprocessed strings. exclusive: Attempt to become exclusive subscriber to the queue. acknowledgement: If true receipt of each message needs to be acknowledged. :return: A unique subscription ID """ self.__subscription_id += 1 def mangled_callback(header, message): return callback(header, self._mangle_for_receiving(message)) if "disable_mangling" in kwargs: if kwargs["disable_mangling"]: mangled_callback = callback # noqa:F811 del kwargs["disable_mangling"] self.__subscriptions[self.__subscription_id] = { "channel": channel, "callback": mangled_callback, "ack": kwargs.get("acknowledgement"), "unsubscribed": False, } self.log.debug("Subscribing to %s with ID %d", channel, self.__subscription_id) self._subscribe(self.__subscription_id, channel, mangled_callback, **kwargs) return self.__subscription_id def unsubscribe(self, subscription: int, drop_callback_reference=False, **kwargs): """Stop listening to a queue or a broadcast :param subscription: Subscription ID to cancel :param drop_callback_reference: Drop the reference to the registered callback function immediately. This means any buffered messages still in flight will not arrive at the intended destination and cause exceptions to be raised instead. :param **kwargs: Further parameters for the transport layer. """ if subscription not in self.__subscriptions: raise workflows.Error("Attempting to unsubscribe unknown subscription") if self.__subscriptions[subscription]["unsubscribed"]: raise workflows.Error( "Attempting to unsubscribe already unsubscribed subscription" ) self._unsubscribe(subscription, **kwargs) self.__subscriptions[subscription]["unsubscribed"] = True if drop_callback_reference: self.drop_callback_reference(subscription) def drop_callback_reference(self, subscription: int): """Drop reference to the callback function after unsubscribing. Any future messages arriving for that subscription will result in exceptions being raised. :param subscription: Subscription ID to delete callback reference for. """ if subscription not in self.__subscriptions: raise workflows.Error( "Attempting to drop callback reference for unknown subscription" ) if not self.__subscriptions[subscription]["unsubscribed"]: raise workflows.Error( "Attempting to drop callback reference for live subscription" ) del self.__subscriptions[subscription] def subscribe_broadcast(self, channel, callback, **kwargs) -> int: """Listen to a broadcast topic, notify via callback function. :param channel: Topic name to subscribe to :param callback: Function to be called when messages are received. The callback will pass two arguments, the header as a dictionary structure, and the message. :param **kwargs: Further parameters for the transport layer. For example disable_mangling: Receive messages as unprocessed strings. retroactive: Ask broker to send old messages if possible :return: A unique subscription ID """ self.__subscription_id += 1 def mangled_callback(header, message): return callback(header, self._mangle_for_receiving(message)) if "disable_mangling" in kwargs: if kwargs["disable_mangling"]: mangled_callback = callback # noqa:F811 del kwargs["disable_mangling"] self.__subscriptions[self.__subscription_id] = { "channel": channel, "callback": mangled_callback, "ack": False, "unsubscribed": False, } self.log.debug( "Subscribing to broadcasts on %s with ID %d", channel, self.__subscription_id, ) self._subscribe_broadcast( self.__subscription_id, channel, mangled_callback, **kwargs ) return self.__subscription_id def subscription_callback(self, subscription: int) -> MessageCallback: """Retrieve the callback function for a subscription. Raise a workflows.Error if the subscription does not exist. All transport callbacks can be intercepted by setting an interceptor function with subscription_callback_intercept(). :param subscription: Subscription ID to look up :return: Callback function """ subscription_record = self.__subscriptions.get(subscription) if not subscription_record: raise workflows.Error("Attempting to callback on unknown subscription") callback = subscription_record["callback"] if self.__callback_interceptor: return self.__callback_interceptor(callback) return callback def subscription_callback_set_intercept(self, interceptor): """Set a function to intercept all callbacks. This is useful to, for example, keep a thread barrier between the transport related functions and processing functions. :param interceptor: A function that takes the original callback function and returns a modified callback function. Or None to disable interception. """ self.__callback_interceptor = interceptor def send(self, destination, message, **kwargs): """Send a message to a queue. :param destination: Queue name to send to :param message: Either a string or a serializable object to be sent :param **kwargs: Further parameters for the transport layer. For example delay: Delay transport of message by this many seconds headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ message = self._mangle_for_sending(message) self._send(destination, message, **kwargs) def raw_send(self, destination, message, **kwargs): """Send a raw (unmangled) message to a queue. This may cause errors if the receiver expects a mangled message. :param destination: Queue name to send to :param message: Either a string or a serializable object to be sent :param **kwargs: Further parameters for the transport layer. For example delay: Delay transport of message by this many seconds headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ self._send(destination, message, **kwargs) def broadcast(self, destination, message, **kwargs): """Broadcast a message. :param destination: Topic name to send to :param message: Either a string or a serializable object to be sent :param **kwargs: Further parameters for the transport layer. For example delay: Delay transport of message by this many seconds headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ message = self._mangle_for_sending(message) self._broadcast(destination, message, **kwargs) def raw_broadcast(self, destination, message, **kwargs): """Broadcast a raw (unmangled) message. This may cause errors if the receiver expects a mangled message. :param destination: Topic name to send to :param message: Either a string or a serializable object to be sent :param **kwargs: Further parameters for the transport layer. For example delay: Delay transport of message by this many seconds headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ self._broadcast(destination, message, **kwargs) def ack(self, message, subscription_id: Optional[int] = None, **kwargs): """Acknowledge receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message: ID of the message to be acknowledged, OR a dictionary containing a field 'message-id'. :param subscription_id: ID of the associated subscription. Optional when a dictionary is passed as first parameter and that dictionary contains field 'subscription'. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if acknowledgement should be part of a transaction """ if isinstance(message, dict): message_id = message.get("message-id") if not subscription_id: subscription_id = message.get("subscription") else: message_id = message if not message_id: raise workflows.Error("Cannot acknowledge message without message ID") if not subscription_id: raise workflows.Error("Cannot acknowledge message without subscription ID") self.log.debug( "Acknowledging message %s on subscription %s", message_id, subscription_id ) self._ack(message_id, subscription_id=subscription_id, **kwargs) def nack(self, message, subscription_id: Optional[int] = None, **kwargs): """Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message: ID of the message to be rejected, OR a dictionary containing a field 'message-id'. :param subscription_id: ID of the associated subscription. Optional when a dictionary is passed as first parameter and that dictionary contains field 'subscription'. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction """ if isinstance(message, dict): message_id = message.get("message-id") if not subscription_id: subscription_id = message.get("subscription") else: message_id = message if not message_id: raise workflows.Error("Cannot reject message without message ID") if not subscription_id: raise workflows.Error("Cannot reject message without subscription ID") self.log.debug( "Rejecting message %s on subscription %d", message_id, subscription_id ) self._nack(message_id, subscription_id=subscription_id, **kwargs) def transaction_begin(self, subscription_id: Optional[int] = None, **kwargs) -> int: """Start a new transaction. :param **kwargs: Further parameters for the transport layer. :return: A transaction ID that can be passed to other functions. """ self.__transaction_id += 1 self.__transactions.add(self.__transaction_id) if subscription_id: self.log.debug( "Starting transaction with ID %d on subscription %d", self.__transaction_id, subscription_id, ) else: self.log.debug("Starting transaction with ID %d", self.__transaction_id) self._transaction_begin( self.__transaction_id, subscription_id=subscription_id, **kwargs ) return self.__transaction_id def transaction_abort(self, transaction_id: int, **kwargs): """Abort a transaction and roll back all operations. :param transaction_id: ID of transaction to be aborted. :param **kwargs: Further parameters for the transport layer. """ if transaction_id not in self.__transactions: raise workflows.Error("Attempting to abort unknown transaction") self.log.debug("Aborting transaction %s", transaction_id) self.__transactions.remove(transaction_id) self._transaction_abort(transaction_id, **kwargs) def transaction_commit(self, transaction_id: int, **kwargs): """Commit a transaction. :param transaction_id: ID of transaction to be committed. :param **kwargs: Further parameters for the transport layer. """ if transaction_id not in self.__transactions: raise workflows.Error("Attempting to commit unknown transaction") self.log.debug("Committing transaction %s", transaction_id) self.__transactions.remove(transaction_id) self._transaction_commit(transaction_id, **kwargs) @property def is_reconnectable(self): """Check if the transport object is in a status where reconnecting is supported. There must not be any active subscriptions or transactions.""" return not self.__subscriptions and not self.__transactions # # -- Low level communication calls to be implemented by subclass ----------- # def _subscribe(self, sub_id: int, channel, callback, **kwargs): """Listen to a queue, notify via callback function. :param sub_id: ID for this subscription in the transport layer :param channel: Queue name to subscribe to :param callback: Function to be called when messages are received :param **kwargs: Further parameters for the transport layer. For example exclusive: Attempt to become exclusive subscriber to the queue. acknowledgement: If true receipt of each message needs to be acknowledged. """ raise NotImplementedError("Transport interface not implemented") def _subscribe_broadcast(self, sub_id: int, channel, callback, **kwargs): """Listen to a broadcast topic, notify via callback function. :param sub_id: ID for this subscription in the transport layer :param channel: Topic name to subscribe to :param callback: Function to be called when messages are received :param **kwargs: Further parameters for the transport layer. For example retroactive: Ask broker to send old messages if possible """ raise NotImplementedError("Transport interface not implemented") def _unsubscribe(self, sub_id: int, **kwargs): """Stop listening to a queue or a broadcast :param sub_id: ID for this subscription in the transport layer """ raise NotImplementedError("Transport interface not implemented") def _send(self, destination, message, **kwargs): """Send a message to a queue. :param destination: Queue name to send to :param message: A string to be sent :param **kwargs: Further parameters for the transport layer. For example headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ raise NotImplementedError("Transport interface not implemented") def _broadcast(self, destination, message, **kwargs): """Broadcast a message. :param destination: Topic name to send to :param message: A string to be broadcast :param **kwargs: Further parameters for the transport layer. For example headers: Optional dictionary of header entries expiration: Optional expiration time, relative to sending time transaction: Transaction ID if message should be part of a transaction """ raise NotImplementedError("Transport interface not implemented") def _ack(self, message_id, subscription_id, **kwargs): """Acknowledge receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be acknowledged. :param subscription_id: ID of the associated subscription. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if acknowledgement should be part of a transaction """ raise NotImplementedError("Transport interface not implemented") def _nack(self, message_id, subscription_id, **kwargs): """Reject receipt of a message. This only makes sense when the 'acknowledgement' flag was set for the relevant subscription. :param message_id: ID of the message to be rejected. :param subscription_id: ID of the associated subscription. :param **kwargs: Further parameters for the transport layer. For example transaction: Transaction ID if rejection should be part of a transaction """ raise NotImplementedError("Transport interface not implemented") def _transaction_begin( self, transaction_id: int, *, subscription_id: Optional[int] = None, **kwargs ) -> None: """Start a new transaction. :param transaction_id: ID for this transaction in the transport layer. :param **kwargs: Further parameters for the transport layer. """ raise NotImplementedError("Transport interface not implemented") def _transaction_abort(self, transaction_id: int, **kwargs) -> None: """Abort a transaction and roll back all operations. :param transaction_id: ID of transaction to be aborted. :param **kwargs: Further parameters for the transport layer. """ raise NotImplementedError("Transport interface not implemented") def _transaction_commit(self, transaction_id: int, **kwargs) -> None: """Commit a transaction. :param transaction_id: ID of transaction to be committed. :param **kwargs: Further parameters for the transport layer. """ raise NotImplementedError("Transport interface not implemented") # # -- Internal message mangling functions ----------------------------------- # # Some transport mechanisms will not be able to work with arbitrary objects, # so these functions are used to prepare a message for sending/receiving. # The canonical example is serialization/deserialization, see stomp_transport @staticmethod def _mangle_for_sending(message): """Function that any message will pass through before it being forwarded to the actual _send* functions.""" return message @staticmethod def _mangle_for_receiving(message): """Function that any message will pass through before it being forwarded to the receiving subscribed callback functions.""" return message def json_serializer(obj): """A helper function for JSON serialization, where it can be used as the default= argument. This function helps the serializer to translate objects that otherwise would not be understood. Note that this is one-way only - these objects are not restored on the receiving end.""" if isinstance(obj, decimal.Decimal): # turn all Decimals into floats return float(obj) raise TypeError(repr(obj) + " is not JSON serializable")
[ "workflows.Error", "logging.getLogger" ]
[((536, 576), 'logging.getLogger', 'logging.getLogger', (['"""workflows.transport"""'], {}), "('workflows.transport')\n", (553, 576), False, 'import logging\n'), ((3795, 3860), 'workflows.Error', 'workflows.Error', (['"""Attempting to unsubscribe unknown subscription"""'], {}), "('Attempting to unsubscribe unknown subscription')\n", (3810, 3860), False, 'import workflows\n'), ((3942, 4020), 'workflows.Error', 'workflows.Error', (['"""Attempting to unsubscribe already unsubscribed subscription"""'], {}), "('Attempting to unsubscribe already unsubscribed subscription')\n", (3957, 4020), False, 'import workflows\n'), ((4658, 4744), 'workflows.Error', 'workflows.Error', (['"""Attempting to drop callback reference for unknown subscription"""'], {}), "(\n 'Attempting to drop callback reference for unknown subscription')\n", (4673, 4744), False, 'import workflows\n'), ((4855, 4933), 'workflows.Error', 'workflows.Error', (['"""Attempting to drop callback reference for live subscription"""'], {}), "('Attempting to drop callback reference for live subscription')\n", (4870, 4933), False, 'import workflows\n'), ((7117, 7182), 'workflows.Error', 'workflows.Error', (['"""Attempting to callback on unknown subscription"""'], {}), "('Attempting to callback on unknown subscription')\n", (7132, 7182), False, 'import workflows\n'), ((12013, 12077), 'workflows.Error', 'workflows.Error', (['"""Cannot acknowledge message without message ID"""'], {}), "('Cannot acknowledge message without message ID')\n", (12028, 12077), False, 'import workflows\n'), ((12128, 12197), 'workflows.Error', 'workflows.Error', (['"""Cannot acknowledge message without subscription ID"""'], {}), "('Cannot acknowledge message without subscription ID')\n", (12143, 12197), False, 'import workflows\n'), ((13469, 13528), 'workflows.Error', 'workflows.Error', (['"""Cannot reject message without message ID"""'], {}), "('Cannot reject message without message ID')\n", (13484, 13528), False, 'import workflows\n'), ((13579, 13643), 'workflows.Error', 'workflows.Error', (['"""Cannot reject message without subscription ID"""'], {}), "('Cannot reject message without subscription ID')\n", (13594, 13643), False, 'import workflows\n'), ((15016, 15074), 'workflows.Error', 'workflows.Error', (['"""Attempting to abort unknown transaction"""'], {}), "('Attempting to abort unknown transaction')\n", (15031, 15074), False, 'import workflows\n'), ((15568, 15627), 'workflows.Error', 'workflows.Error', (['"""Attempting to commit unknown transaction"""'], {}), "('Attempting to commit unknown transaction')\n", (15583, 15627), False, 'import workflows\n')]
import numpy as np import torch import torch.optim as optim import torch.nn as nn from torch.autograd import Variable import skimage.io as io import argparse import os import sys import time # Allow python3 to search for modules outside of this directory sys.path.append("../") from models.skip import skip3d from volumetocube import write_bin_from_array from volumetocube import write_obj_from_array import binvox_rw from tools.Ops import radon from tools.Ops import tvloss from tools.Ops import tvloss3d from tools.Ops import load_binvox from tools.Ops import volume_proj from tools.Ops import rotate_volume from tools.Ops import inv_rotate_volume from skimage.measure import compare_ssim as ssim parser = argparse.ArgumentParser(description='Reconstruciton using deep prior.') parser.add_argument("-m", "--method", type=str, help="Prior to be used in the reconstruction (deep | tv | carve)", default="deep") parser.add_argument("-b", "--binvox", type=str, help="Path to the binvox file.", default="../data/bunny.binvox") parser.add_argument("-p", "--projection", type=str, help="Type of projection to be used (depth | binary)", default="depth") parser.add_argument("-n", "--nproj", type=int, help="Number of projections.", default=8) parser.add_argument("-s", "--sigma", type=float, help="Amount of variance in the gaussian noise.", default=0.0) parser.add_argument("-k", "--kappa", type=float, help="Dispersion rate of Von Mises noise.", default=4.0) parser.add_argument("-v", "--viewWeight", type=float, help="Weight of the viewpoint regularization.", default=1.0) def add_gaussian_noise(img, sigma=1.0): randv = torch.randn(*(img.shape)).cuda() return img + sigma*randv if __name__ == '__main__': args = parser.parse_args() use_tv = args.method == 'tv' use_dp = args.method == 'deep' kappa = args.kappa view_weight = args.viewWeight binvoxname = args.binvox.split('/')[-1].split('.')[0] fullname = "prob_{}_{}_{}_{}_{}_vw{}_k{}".format(binvoxname, args.method, args.projection, args.nproj, args.sigma, view_weight, kappa) input_depth = 3 input_noise = torch.randn(1, input_depth, 128, 128, 128).cuda() net = skip3d( input_depth, 1, num_channels_down = [8, 16, 32, 64, 128], num_channels_up = [8, 16, 32, 64, 128], num_channels_skip = [0, 0, 0, 4, 4], upsample_mode='trilinear', need_sigmoid=True, need_bias=True, pad='zero', act_fun='LeakyReLU') net.cuda() net(input_noise) out_volume = torch.zeros(1, 1, 128, 128, 128).cuda() out_volume.requires_grad = True nviews = args.nproj method = args.projection views = torch.FloatTensor(np.random.rand(nviews, 3) * 2*np.pi) noisy_views = torch.FloatTensor(np.random.vonmises(views, kappa, size=(nviews,3))) pred_views = nn.Parameter(noisy_views.detach().clone()) if use_dp: optimizer = optim.Adam(list(net.parameters()) + [pred_views], lr=0.01) elif use_tv: optimizer = optim.Adam([out_volume] + [pred_views], lr=0.01) padder = nn.ConstantPad3d(10, 0.0) volume = padder(load_binvox(args.binvox).cuda()) gtprojs = volume_proj(volume, method=method, views=views).cuda() noisyprojs = gtprojs.detach().clone() noisyprojs.requires_grad = False results_dir = os.path.join("results", fullname) if not os.path.exists(results_dir): os.makedirs(results_dir) mse = nn.L1Loss() sigmoid = nn.Sigmoid() #Space carving if args.method == 'carve': gtprojs = volume_proj(volume, method=method, views=views).cuda() gtprojs.requires_grad = False noisyprojs = gtprojs.clone() noisyprojs.requires_grad = False carve = torch.ones(*(volume.size())).cuda() for i in range(nviews): carve = rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2]) p = gtprojs[:, :, i] < 1e-2 coords = np.argwhere(p) carve[coords[0, :], :, coords[1, :]] = 0.0 carve = inv_rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2]) projs = volume_proj(carve, method=method, views=views).cuda() for i in range(noisyprojs.size()[2]): io.imsave(results_dir+"/carve{}.png".format(i), torch.clamp(projs[:, :, i], -1, 1)) io.imsave(results_dir+"/carvegt{}.png".format(i), torch.clamp(gtprojs[:, :, i], -1, 1)) write_bin_from_array("results/{}/data.npy".format(fullname), carve.data.cpu().numpy()) exit(0) gt_curve = [] noisygt_curve = [] n_iter = 500 out_rec = None out_projs = None pred_views_log = [] noisy_views_log = [] gt_views_log = [] print('EXPERIMENT {}'.format(fullname)) for i in range(n_iter): optimizer.zero_grad() if use_dp: out_rec = net(input_noise)[0, 0, :, :, :] out_projs = volume_proj(out_rec, method=method, views=pred_views) loss = mse(out_projs, noisyprojs) loss -= view_weight * torch.cos(pred_views - noisy_views).mean().cuda() elif use_tv: out_rec = sigmoid(out_volume[0, 0, :, :, :]) out_projs = volume_proj(out_rec, method=method, views=views) loss = mse(out_projs, noisyprojs) + tvloss3d(out_rec, weight=1e-7)# else: raise ValueError("Unkown method") pred_views_log.append(pred_views.data.detach().cpu().numpy()) noisy_views_log.append(noisy_views.data.detach().cpu().numpy()) gt_views_log.append(views.data.detach().cpu().numpy()) predloss = mse(out_projs, noisyprojs) gtloss = torch.abs(out_projs - gtprojs).mean() noisyloss = torch.abs(noisyprojs - gtprojs).mean() print("\r({}/{}) Pred->Noisy: {} | Pred->GT: {} | Noisy->GT: {}".format( str(i).zfill(4), n_iter, predloss.item(), gtloss.item(), noisyloss.item()), gt_curve.append(gtloss.item())) noisygt_curve.append(noisyloss.item()) loss.backward() optimizer.step() results_dir = os.path.join("results", fullname) if not os.path.exists(results_dir): os.makedirs(results_dir) write_bin_from_array("results/{}/databin.npy".format(fullname), out_rec.data.cpu().detach().numpy()) np.save("results/{}/data.npy".format(fullname), out_rec.data.cpu().detach().numpy()) for i in range(out_projs.size()[2]): print("Saved {}".format("results/{}/proj{}".format(fullname, i))) io.imsave("results/{}/proj{}.png".format(fullname, i), out_projs.data.cpu().detach().numpy()[:, :, i]) io.imsave("results/{}/gt{}.png".format(fullname, i), torch.clamp(gtprojs[:, :, i], -1, 1).data.cpu().detach().numpy()) np.save("results/{}/gtviews.npy".format(fullname), np.array(gt_views_log)) np.save("results/{}/noisyviews.npy".format(fullname), np.array(noisy_views_log)) np.save("results/{}/predviews.npy".format(fullname), np.array(pred_views_log))
[ "argparse.ArgumentParser", "torch.randn", "torch.cos", "tools.Ops.volume_proj", "os.path.join", "tools.Ops.rotate_volume", "sys.path.append", "os.path.exists", "torch.zeros", "tools.Ops.load_binvox", "tools.Ops.tvloss3d", "torch.nn.ConstantPad3d", "torch.optim.Adam", "torch.clamp", "numpy.argwhere", "torch.nn.Sigmoid", "os.makedirs", "torch.nn.L1Loss", "tools.Ops.inv_rotate_volume", "numpy.random.vonmises", "numpy.array", "models.skip.skip3d", "numpy.random.rand", "torch.abs" ]
[((257, 279), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (272, 279), False, 'import sys\n'), ((714, 785), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reconstruciton using deep prior."""'}), "(description='Reconstruciton using deep prior.')\n", (737, 785), False, 'import argparse\n'), ((2186, 2429), 'models.skip.skip3d', 'skip3d', (['input_depth', '(1)'], {'num_channels_down': '[8, 16, 32, 64, 128]', 'num_channels_up': '[8, 16, 32, 64, 128]', 'num_channels_skip': '[0, 0, 0, 4, 4]', 'upsample_mode': '"""trilinear"""', 'need_sigmoid': '(True)', 'need_bias': '(True)', 'pad': '"""zero"""', 'act_fun': '"""LeakyReLU"""'}), "(input_depth, 1, num_channels_down=[8, 16, 32, 64, 128],\n num_channels_up=[8, 16, 32, 64, 128], num_channels_skip=[0, 0, 0, 4, 4],\n upsample_mode='trilinear', need_sigmoid=True, need_bias=True, pad=\n 'zero', act_fun='LeakyReLU')\n", (2192, 2429), False, 'from models.skip import skip3d\n'), ((3075, 3100), 'torch.nn.ConstantPad3d', 'nn.ConstantPad3d', (['(10)', '(0.0)'], {}), '(10, 0.0)\n', (3091, 3100), True, 'import torch.nn as nn\n'), ((3326, 3359), 'os.path.join', 'os.path.join', (['"""results"""', 'fullname'], {}), "('results', fullname)\n", (3338, 3359), False, 'import os\n'), ((3444, 3455), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (3453, 3455), True, 'import torch.nn as nn\n'), ((3470, 3482), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3480, 3482), True, 'import torch.nn as nn\n'), ((6112, 6145), 'os.path.join', 'os.path.join', (['"""results"""', 'fullname'], {}), "('results', fullname)\n", (6124, 6145), False, 'import os\n'), ((2765, 2815), 'numpy.random.vonmises', 'np.random.vonmises', (['views', 'kappa'], {'size': '(nviews, 3)'}), '(views, kappa, size=(nviews, 3))\n', (2783, 2815), True, 'import numpy as np\n'), ((3371, 3398), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (3385, 3398), False, 'import os\n'), ((3408, 3432), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (3419, 3432), False, 'import os\n'), ((6157, 6184), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (6171, 6184), False, 'import os\n'), ((6194, 6218), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (6205, 6218), False, 'import os\n'), ((6889, 6911), 'numpy.array', 'np.array', (['gt_views_log'], {}), '(gt_views_log)\n', (6897, 6911), True, 'import numpy as np\n'), ((6971, 6996), 'numpy.array', 'np.array', (['noisy_views_log'], {}), '(noisy_views_log)\n', (6979, 6996), True, 'import numpy as np\n'), ((7055, 7079), 'numpy.array', 'np.array', (['pred_views_log'], {}), '(pred_views_log)\n', (7063, 7079), True, 'import numpy as np\n'), ((1630, 1653), 'torch.randn', 'torch.randn', (['*img.shape'], {}), '(*img.shape)\n', (1641, 1653), False, 'import torch\n'), ((2126, 2168), 'torch.randn', 'torch.randn', (['(1)', 'input_depth', '(128)', '(128)', '(128)'], {}), '(1, input_depth, 128, 128, 128)\n', (2137, 2168), False, 'import torch\n'), ((2531, 2563), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', '(128)', '(128)', '(128)'], {}), '(1, 1, 128, 128, 128)\n', (2542, 2563), False, 'import torch\n'), ((3008, 3056), 'torch.optim.Adam', 'optim.Adam', (['([out_volume] + [pred_views])'], {'lr': '(0.01)'}), '([out_volume] + [pred_views], lr=0.01)\n', (3018, 3056), True, 'import torch.optim as optim\n'), ((3173, 3220), 'tools.Ops.volume_proj', 'volume_proj', (['volume'], {'method': 'method', 'views': 'views'}), '(volume, method=method, views=views)\n', (3184, 3220), False, 'from tools.Ops import volume_proj\n'), ((3828, 3916), 'tools.Ops.rotate_volume', 'rotate_volume', (['carve'], {'x': 'noisy_views[i, 0]', 'y': 'noisy_views[i, 1]', 'z': 'noisy_views[i, 2]'}), '(carve, x=noisy_views[i, 0], y=noisy_views[i, 1], z=\n noisy_views[i, 2])\n', (3841, 3916), False, 'from tools.Ops import rotate_volume\n'), ((3970, 3984), 'numpy.argwhere', 'np.argwhere', (['p'], {}), '(p)\n', (3981, 3984), True, 'import numpy as np\n'), ((4060, 4152), 'tools.Ops.inv_rotate_volume', 'inv_rotate_volume', (['carve'], {'x': 'noisy_views[i, 0]', 'y': 'noisy_views[i, 1]', 'z': 'noisy_views[i, 2]'}), '(carve, x=noisy_views[i, 0], y=noisy_views[i, 1], z=\n noisy_views[i, 2])\n', (4077, 4152), False, 'from tools.Ops import inv_rotate_volume\n'), ((4941, 4994), 'tools.Ops.volume_proj', 'volume_proj', (['out_rec'], {'method': 'method', 'views': 'pred_views'}), '(out_rec, method=method, views=pred_views)\n', (4952, 4994), False, 'from tools.Ops import volume_proj\n'), ((2692, 2717), 'numpy.random.rand', 'np.random.rand', (['nviews', '(3)'], {}), '(nviews, 3)\n', (2706, 2717), True, 'import numpy as np\n'), ((3126, 3150), 'tools.Ops.load_binvox', 'load_binvox', (['args.binvox'], {}), '(args.binvox)\n', (3137, 3150), False, 'from tools.Ops import load_binvox\n'), ((3552, 3599), 'tools.Ops.volume_proj', 'volume_proj', (['volume'], {'method': 'method', 'views': 'views'}), '(volume, method=method, views=views)\n', (3563, 3599), False, 'from tools.Ops import volume_proj\n'), ((4162, 4208), 'tools.Ops.volume_proj', 'volume_proj', (['carve'], {'method': 'method', 'views': 'views'}), '(carve, method=method, views=views)\n', (4173, 4208), False, 'from tools.Ops import volume_proj\n'), ((4322, 4356), 'torch.clamp', 'torch.clamp', (['projs[:, :, i]', '(-1)', '(1)'], {}), '(projs[:, :, i], -1, 1)\n', (4333, 4356), False, 'import torch\n'), ((4420, 4456), 'torch.clamp', 'torch.clamp', (['gtprojs[:, :, i]', '(-1)', '(1)'], {}), '(gtprojs[:, :, i], -1, 1)\n', (4431, 4456), False, 'import torch\n'), ((5227, 5275), 'tools.Ops.volume_proj', 'volume_proj', (['out_rec'], {'method': 'method', 'views': 'views'}), '(out_rec, method=method, views=views)\n', (5238, 5275), False, 'from tools.Ops import volume_proj\n'), ((5686, 5716), 'torch.abs', 'torch.abs', (['(out_projs - gtprojs)'], {}), '(out_projs - gtprojs)\n', (5695, 5716), False, 'import torch\n'), ((5744, 5775), 'torch.abs', 'torch.abs', (['(noisyprojs - gtprojs)'], {}), '(noisyprojs - gtprojs)\n', (5753, 5775), False, 'import torch\n'), ((5324, 5355), 'tools.Ops.tvloss3d', 'tvloss3d', (['out_rec'], {'weight': '(1e-07)'}), '(out_rec, weight=1e-07)\n', (5332, 5355), False, 'from tools.Ops import tvloss3d\n'), ((5075, 5110), 'torch.cos', 'torch.cos', (['(pred_views - noisy_views)'], {}), '(pred_views - noisy_views)\n', (5084, 5110), False, 'import torch\n'), ((6767, 6803), 'torch.clamp', 'torch.clamp', (['gtprojs[:, :, i]', '(-1)', '(1)'], {}), '(gtprojs[:, :, i], -1, 1)\n', (6778, 6803), False, 'import torch\n')]
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """Logging related module.""" import os import logging from logging import _checkLevel from fastseq.config import FASTSEQ_DEFAULT_LOG_LEVEL, FASTSEQ_LOG_LEVEL, FASTSEQ_LOG_FORMAT def set_default_log_level(): """Set the default log level from the environment variable""" try: fastseq_log_level = _checkLevel(FASTSEQ_LOG_LEVEL) except (ValueError, TypeError) as e: logging.error( "Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. " "'DEBUG', 'INFO'): {}".format(e)) raise logging.basicConfig(level=fastseq_log_level, format=FASTSEQ_LOG_FORMAT) def get_logger(name=None, level=logging.INFO): """ Return a logger with the specific name, creating it if necessary. If no name is specified, return the root logger. Args: name (str, optional): logger name. Defaults to None. Returns: Logger : the specified logger. """ level = _checkLevel(level) if FASTSEQ_LOG_LEVEL != FASTSEQ_DEFAULT_LOG_LEVEL: try: level = _checkLevel(FASTSEQ_LOG_LEVEL) except (ValueError, TypeError) as e: logging.error( "Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. " "'DEBUG', 'INFO'): {}".format(e)) raise logger = logging.getLogger(name) logger.setLevel(level) return logger def update_all_log_level(level=logging.INFO): """ Update all the loggers to use the specified level. Args: level (int/str, optional): the log level. Defaults to logging.INFO. """ loggers = [ logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: logger.setLevel(level)
[ "logging._checkLevel", "logging.getLogger", "logging.basicConfig" ]
[((619, 690), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'fastseq_log_level', 'format': 'FASTSEQ_LOG_FORMAT'}), '(level=fastseq_log_level, format=FASTSEQ_LOG_FORMAT)\n', (638, 690), False, 'import logging\n'), ((1017, 1035), 'logging._checkLevel', '_checkLevel', (['level'], {}), '(level)\n', (1028, 1035), False, 'from logging import _checkLevel\n'), ((1383, 1406), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1400, 1406), False, 'import logging\n'), ((390, 420), 'logging._checkLevel', '_checkLevel', (['FASTSEQ_LOG_LEVEL'], {}), '(FASTSEQ_LOG_LEVEL)\n', (401, 420), False, 'from logging import _checkLevel\n'), ((1681, 1704), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1698, 1704), False, 'import logging\n'), ((1124, 1154), 'logging._checkLevel', '_checkLevel', (['FASTSEQ_LOG_LEVEL'], {}), '(FASTSEQ_LOG_LEVEL)\n', (1135, 1154), False, 'from logging import _checkLevel\n')]
#!/usr/bin/env python import argparse from ast import parse import numpy as np import bitstring def to_fixed(x, args): F = args.fixed_point_bits[0] - args.fixed_point_bits[1] return np.round(x * 2**F) def to_float(x, args): F = args.fixed_point_bits[0] - args.fixed_point_bits[1] return x * 2**-F def vals_to_hex(vals, args): nb = args.fixed_point_bits[0] # bits of one value tnb = len(vals) * nb # bitwidth of N values assert args.link_bitwidth >= tnb, \ "Attempting to pack {} x {} bits ({} bits) into {}".format( len(vals), nb, tnb, args.link_bitwidth) pad = args.link_bitwidth - tnb fmt_string = 'uint:{},'.format(pad) + 'int:{},'.format(nb) * len(vals) return bitstring.pack(fmt_string, 0, *vals).hex def row_to_hex(row, args): # compute the packing factor pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1 N = int(np.ceil(len(row) / pf)) y = np.array([vals_to_hex(np.flip(row[i*pf:(i+1)*pf]), args) for i in range(N)]) return y def main(): parser = argparse.ArgumentParser( description='Parse numpy file to FPGA testing for MP7 board') parser.add_argument('--board_name', type=str, help='A string representing the name of the board') parser.add_argument('--generate_float_from_fix', type=bool, help='Specify if you want to obtain the .npy file ' 'obtained via to_float(to_fix(input)). It is useful to ' 'feed to avoid casting mismatches using ' 'hls_model.predict()') parser.add_argument('--link_range', choices=range(0,96), type=int, nargs=2, metavar=('start','stop'), help='Start and stop values ' 'for the range related to links') parser.add_argument('--link_bitwidth', choices=[32,64], type=int, help='Word size in bits of each link') parser.add_argument('--invalid_rows', type=int, help='The number of invalid that will be generate at ' 'the beginning of the test') parser.add_argument('--fixed_point_bits', type=int, nargs=2, metavar=('word_bits', 'integer_bits'), help='The number of invalid that will be generate at ' 'the beginning of the test') parser.add_argument('--pack_links', type=bool, help='Whether to pack ' 'multiple values into one link where possible') parser.add_argument('--link_map', type=int, nargs='*', help='The link map') parser.add_argument('--input_data_path', type=str, help='The path of the numpy file containing data in ' 'floating point') parser.add_argument('--output_data_path', type=str, help='The path of the produced .txt file containing ' 'data in fixed point') args = parser.parse_args() fp32_data = np.load(args.input_data_path) # packing factor pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1 link_width = args.link_range[1] - args.link_range[0] + 1 if fp32_data.shape[1] > link_width * pf: raise Exception( 'Trying to fit {} features into {} links with packing factor {}' .format(fp32_data.shape[1],link_width,pf)) if fp32_data.shape[0] > 1024: print('The system expect no more than 1024 rows; the original file will ' 'be truncated, keeping the first 1024 rows') fp32_data = fp32_data[:1024] output_file = open(args.output_data_path, 'w') fixed_data = to_fixed(fp32_data, args) if args.generate_float_from_fix: float_back_data = to_float(fixed_data, args) np.save('float_from_fix.npy', float_back_data) fixed_data = np.array([row_to_hex(row, args) for row in fixed_data]) link_map = list(range(args.link_range[0], args.link_range[1] + 1)) \ if args.link_map is None else args.link_map assert len(link_map) == link_width, \ 'Link map length ({}) does not match link range ({})'.format( len(link_map), link_width) # board section board_string = 'Board {}\n'.format(args.board_name) # channel section quad_chan_string = ' Quad/Chan : ' for i in link_map: if args.link_bitwidth == 32: quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4) else: quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4) if i != link_map[-1]: quad_chan_string += ' ' else: quad_chan_string += '\n' # link section link_string = ' Link : ' for i in link_map: if args.link_bitwidth == 32: link_string += ' {:02d} '.format(i) else: link_string += ' {:02d} '.format(i) if i != link_map[-1]: link_string += ' ' else: link_string += '\n' # frame section frame_start = 'Frame {:04d} : ' frame = '' if args.invalid_rows > 0: for i in range(0,args.invalid_rows): frame += frame_start.format(i) for j in range(0, args.link_range[1] - args.link_range[0] + 1): if args.link_bitwidth == 32: frame += '0v00000000' else: frame += '0v0000000000000000' if j != args.link_range[1] - args.link_range[0]: frame += ' ' else: frame += '\n' dummy_cols = args.link_range[1] - args.link_range[0] - fp32_data.shape[1] for i, v in enumerate(fixed_data): frame += frame_start.format(i + args.invalid_rows) for j, k in enumerate(v): frame += '1v' + k frame += ' ' if dummy_cols > 0: for s in range(0, dummy_cols + 1): if args.link_bitwidth == 32: frame += '0v00000000' else: frame += '0v0000000000000000' if s + j != args.link_range[1] - args.link_range[0] - 1: frame += ' ' frame += '\n' l = [board_string, quad_chan_string, link_string, frame] output_file.writelines(l) output_file.close() print('Done!')
[ "numpy.load", "numpy.save", "numpy.flip", "argparse.ArgumentParser", "bitstring.pack", "numpy.round" ]
[((191, 211), 'numpy.round', 'np.round', (['(x * 2 ** F)'], {}), '(x * 2 ** F)\n', (199, 211), True, 'import numpy as np\n'), ((1090, 1180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse numpy file to FPGA testing for MP7 board"""'}), "(description=\n 'Parse numpy file to FPGA testing for MP7 board')\n", (1113, 1180), False, 'import argparse\n'), ((3071, 3100), 'numpy.load', 'np.load', (['args.input_data_path'], {}), '(args.input_data_path)\n', (3078, 3100), True, 'import numpy as np\n'), ((728, 764), 'bitstring.pack', 'bitstring.pack', (['fmt_string', '(0)', '*vals'], {}), '(fmt_string, 0, *vals)\n', (742, 764), False, 'import bitstring\n'), ((3873, 3919), 'numpy.save', 'np.save', (['"""float_from_fix.npy"""', 'float_back_data'], {}), "('float_from_fix.npy', float_back_data)\n", (3880, 3919), True, 'import numpy as np\n'), ((978, 1011), 'numpy.flip', 'np.flip', (['row[i * pf:(i + 1) * pf]'], {}), '(row[i * pf:(i + 1) * pf])\n', (985, 1011), True, 'import numpy as np\n')]
""" @brief This file holds classes that store information about the endoscopic images that are going to be segmented. @author <NAME> (<EMAIL>). @date 25 Aug 2015. """ import numpy as np import os import cv2 # import caffe import sys import random import matplotlib.pyplot as plt import scipy.misc import imutils import geometry import tempfile import PIL import skimage.morphology import skimage.util # My imports import common # # @brief Perlin noise generator. # def perlin(x, y, seed): # Permutation table np.random.seed(seed) p = np.arange(256, dtype = int) np.random.shuffle(p) p = np.stack([p, p]).flatten() # Coordinates of the top-left xi = x.astype(int) yi = y.astype(int) # Internal coordinates xf = x - xi yf = y - yi # Fade factors u = fade(xf) v = fade(yf) # Noise components n00 = gradient(p[p[xi] + yi], xf, yf) n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1) n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1) n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf) # Combine noises x1 = lerp(n00, n10, u) x2 = lerp(n01, n11, u) return lerp(x1, x2, v) # # @brief Linear interpolation. # def lerp(a, b, x): return a + x * (b - a) # # @brief 6t^5 - 15t^4 + 10t^3. # def fade(t): return 6 * t**5 - 15 * t**4 + 10 * t**3 # # @brief Grad converts h to the right gradient vector and return the dot product with (x, y). # def gradient(h, x, y): vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]]) g = vectors[h % 4] return g[:,:, 0] * x + g[:,:, 1] * y # # @brief Perlin noise image. # # @param[in] height Height of the output image. # @param[in] width Width of the output image. # @param[in] scale Higher means smaller blobs. # @param[in] minval The minimum noise value. # @param[in] maxval The maximum noise value. # # @returns a 2D numpy array. def perlin2d_smooth(height, width, scale, minval = 0.0, maxval = 1.0, seed = None): lin_y = np.linspace(0, scale, height, endpoint = False) lin_x = np.linspace(0, scale, width, endpoint = False) x, y = np.meshgrid(lin_x, lin_y) arr = perlin(x, y, seed) min_arr = np.min(arr) max_arr = np.max(arr) arr = (np.clip((arr - min_arr) / (max_arr - min_arr), 0.0, 1.0) * (maxval - minval)) + minval return arr # # @brief Given a set of 2D points it finds the center and radius of a circle. # # @param[in] x List or array of x coordinates. # @param[in] y List or array of y coordinates. # # @returns (xc, yc, radius). def fit_circle(x, y): # Coordinates of the barycenter x_m = np.mean(x) y_m = np.mean(y) # Calculation of the reduced coordinates u = x - x_m v = y - y_m # Linear system defining the center in reduced coordinates (uc, vc): # Suu * uc + Suv * vc = (Suuu + Suvv)/2 # Suv * uc + Svv * vc = (Suuv + Svvv)/2 Suv = np.sum(u*v) Suu = np.sum(u**2) Svv = np.sum(v**2) Suuv = np.sum(u**2 * v) Suvv = np.sum(u * v**2) Suuu = np.sum(u**3) Svvv = np.sum(v**3) # Solving the linear system A = np.array([ [ Suu, Suv ], [Suv, Svv]]) B = np.array([ Suuu + Suvv, Svvv + Suuv ])/2.0 uc, vc = np.linalg.solve(A, B) xc_1 = x_m + uc yc_1 = y_m + vc # Calculation of all distances from the center (xc_1, yc_1) Ri_1 = np.sqrt((x - xc_1) ** 2 + (y - yc_1) ** 2) R_1 = np.mean(Ri_1) residu_1 = np.sum((Ri_1-R_1) ** 2) residu2_1 = np.sum((Ri_1 ** 2 - R_1 ** 2) ** 2) return xc_1, yc_1, R_1 # # @brief Zero parameter Canny edge detector. # def auto_canny(image, sigma = 0.33): v = np.median(image) lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edges = cv2.Canny(image, lower, upper) return edges # # @brief Abstract image class. This is not meant to be instantiated and it refers to a general # multidimensional image or label. # class CaffeinatedAbstract(object): # # @brief Every image must have at least data and name. We ensure of that with this abstract # constructor that will be called by all the children. # # @param[in] raw_frame Multidimensional image, at least H x W. # @param[in] name String with the name of the image. It can also be the frame number # of a video, but it will be converted to string. # def __init__(self, raw_frame, name): # Assert that the frame has data if len(raw_frame.shape) <= 1 or raw_frame.shape[0] <= 0 or raw_frame.shape[1] <= 0: raise RuntimeError('[CaffeinatedAbstract.__init__], the image provided ' \ 'does not have data.') # Assert that the name is valid if not name: raise ValueError('[CaffeinatedAbstract.__init__] Error, every caffeinated ' \ 'abstract child must have a name.') # Store attributes in class self._raw_frame = raw_frame self._name = str(name) # # @brief Access to a copy of the internal BGR image. # # @returns a copy of the internal frame, whatever it is, image or label. def raw_copy(self): return self._raw_frame.copy() # # @brief Saves image to file. # # @param[in] path Destination path. # @param[in] flags Flags that will be passed to OpenCV. # def save(self, path, flags): # Assert that the destination path does not exist if common.path_exists(path): raise ValueError('[CaffeinatedImage.save] Error, destination path ' \ + str(path) + ' already exists.') if flags: return cv2.imwrite(path, self._raw_frame, flags) else: return cv2.imwrite(path, self._raw_frame) # # @brief Crops an image in a rectangular fashion, including both corner pixels in the image. # # @param[in] tlx Integer that represents the top left corner column. # @param[in] tly Integer that represents the top left corner row. # @param[in] brx Integer that represents the bottom right corner column. # @param[in] bry Integer that represents the bottom right corner row. # # @returns nothing. def crop(self, tlx, tly, brx, bry): assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \ and isinstance(bry, type(1))) assert(tlx <= brx) assert(tly <= bry) self._raw_frame = self._raw_frame[tly:bry + 1, tlx:brx + 1] def resize_to_width(self, new_w, interp): self._raw_frame = CaffeinatedAbstract.resize_width(self._raw_frame, new_w, interp) # # @brief Convert binary mask into just the mask of its boundary. # # @param[in] mask Input mask. # @param[in] thickness Thickness of the border. # # @returns the boundary mask. @staticmethod def mask2border(mask, thickness): # Find the contour of the mask cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1] # Create a new image with just the contour new_mask = np.zeros_like(mask) new_mask = cv2.drawContours(new_mask, cnts, -1, 255, thickness) return new_mask # # @brief Histogram equalisation (CLAHE). # # @param[in] im Input image. # @param[in] clip_limit Contrast limit. # # @returns the equalised image. @staticmethod def clahe(im, clip_limit = 2.0): lab = cv2.cvtColor(im, cv2.COLOR_BGR2Lab) clahe_engine = cv2.createCLAHE(clipLimit = clip_limit, tileGridSize = (8, 8)) lab[:,:, 0] = clahe_engine.apply(lab[:,:, 0]) return cv2.cvtColor(lab, cv2.COLOR_Lab2BGR) # # @brief Flip left-right. # # @returns the flipped image. @staticmethod def fliplr(im): return np.fliplr(im) # # @brief Flip up-down. # # @returns the flipped image. @staticmethod def flipud(im): return np.flipud(im) # # @brief Thresholds a grayscale image. # # @param[in] img Input grayscale image. # @param[in] level Greater than this level will be set to maxval. Default value is 127. # @param[in] maxval Th values greater than level will be set to maxval. # Default value is 255. # # @returns the thresholded image. @staticmethod def bin_thresh(im, level = 127, maxval = 255): assert(len(im.shape) == 2 or (len(im.shape) == 3 and im.shape[2] == 1)) _, thresh = cv2.threshold(np.squeeze(im), level, maxval, cv2.THRESH_BINARY) return thresh # # @brief Random crop, both dimensions should be equal or smaller than the original size. # @details If a list is given, all the images must be larger than the desired new height and # width. # # @param[in] img Ndarray with the image, shape (height, width) or # (height, width, channels). # @param[in] new_height Height of the cropped image. # @param[in] new_width Width of the cropped image. # # @returns a cropped patch. @staticmethod def random_crop(im, new_height, new_width): assert(isinstance(im, np.ndarray)) assert(new_height > 0 and new_height <= im.shape[0]) assert(new_width > 0 and new_width <= im.shape[1]) # Choose random coordinates for crop height_border = im.shape[0] - new_height width_border = im.shape[1] - new_width top_y = random.randint(0, height_border - 1) if height_border > 0 else 0 top_x = random.randint(0, width_border - 1) if width_border > 0 else 0 # Crop image new_im = im[top_y:top_y + new_height, top_x:top_x + new_width].copy() assert(new_im.shape[0] == new_height) assert(new_im.shape[1] == new_width) return new_im # # @brief Performs a random crop. New height and width is decided independently, this # function changes the form factor. # # @param[in] img Input image, numpy array. # @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be # minimum half of the original. # # @returns the new image. @staticmethod def random_crop_factor(im, delta): assert(isinstance(im, np.ndarray)) min_scale = 1.0 - delta max_scale = 1.0 new_scale = random.uniform(min_scale, max_scale) new_height = int(round(im.shape[0] * new_scale)) new_width = int(round(im.shape[1] * new_scale)) new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width) return new_im # # @brief Performs a random crop. New height and width is decided independently, this # function changes the form factor. # # @param[in] img Input image, numpy array. # @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be # minimum half of the original. # # @returns the new image. @staticmethod def random_crop_no_factor(im, delta): assert(isinstance(im, np.ndarray)) min_scale = 1.0 - delta max_scale = 1.0 new_height = int(round(im.shape[0] * random.uniform(min_scale, max_scale))) new_width = int(round(im.shape[1] * random.uniform(min_scale, max_scale))) new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width) return new_im # # @brief Random crop of a list of images. The crops will be performed in different locations # for the different images of the list, but all the output images will have the same # size. # # @param[in] im_list List of images to be cropped. # @param[in] new_height Height of the cropped image. # @param[in] new_width Width of the cropped image. # # @returns a list of cropped images to the desired size. @staticmethod def random_crop_list(im_list, new_height, new_width): assert(isinstance(im_list, list)) assert(len(im_list) > 0) new_im_list = [ CaffeinatedAbstract.random_crop(im, new_height, new_width) \ for im in im_list ] return new_im_list # # @brief Random crop all the images of the list in the same coordinates for all of them. # All the input images MUST have the same size. # # @param[in] im_list List of images to be cropped. # @param[in] new_height Height of the cropped image. # @param[in] new_width Width of the cropped image. # # @returns a list of cropped images to the desired size. @staticmethod def random_crop_same_coord_list(im_list, new_height, new_width): assert(isinstance(im_list, list)) assert(len(im_list) > 0) # Choose random coordinates for crop height_border = im_list[0].shape[0] - new_height width_border = im_list[0].shape[1] - new_width top_y = random.randint(0, height_border - 1) if height_border > 0 else 0 top_x = random.randint(0, width_border - 1) if width_border > 0 else 0 # Crop all the images in the list new_im_list = [ im[top_y:top_y + new_height, top_x:top_x + new_width].copy() \ for im in im_list ] return new_im_list # # @brief Random crop all the images of the list in the same coordinates for all of them. # All the images MUST have the same size. The output images will have the same form # factor. # # @param[in] im_list List of images to be cropped. # @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be # minimum half of the original. # # @returns a list of cropped images to the desired size. @staticmethod def random_crop_same_coord_list_factor(im_list, delta): assert(isinstance(im_list, list)) assert(len(im_list) > 0) # Get the dimensions of the new images min_scale = 1.0 - delta max_scale = 1.0 new_scale = random.uniform(min_scale, max_scale) new_height = int(round(im.shape[0] * new_scale)) new_width = int(round(im.shape[1] * new_scale)) return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width) # # @brief Random crop all the images of the list in the same coordinates for all of them. # All the images MUST have the same size. The output images will not have the same # form factor. # # @param[in] im_list List of images to be cropped. # @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be # minimum half of the original. # # @returns a list of cropped images to the desired size. @staticmethod def random_crop_same_coord_list_no_factor(im_list, delta): assert(isinstance(im_list, list)) assert(len(im_list) > 0) # Get the dimensions of the new images min_scale = 1.0 - delta max_scale = 1.0 new_height = int(round(im_list[0].shape[0] * random.uniform(min_scale, max_scale))) new_width = int(round(im_list[0].shape[1] * random.uniform(min_scale, max_scale))) return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width) # # @brief Scale an image keeping original size, that is, the output image will have the # size of the input. # # @details If the scale factor is smaller than 1.0, the output image will be padded. # Otherwise it will be cropped. # # @param[in] im Input image or list of images. # @param[in] scale_factor If 1.0, the image stays as it is. # @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos. # @param[in] boder_value Border value. Used when the image is downsized and padded. # @param[in] clip_sides List of sides to crop out. Used only in case the scaling factor # is lower than 1.0. # # @returns the scaled image. @staticmethod def scale_keeping_size(im, scale_factor, interp, border_value, clip_sides = None): if clip_sides is None: clip_sides = [] # Resize image to the desired new scale new_im = CaffeinatedAbstract.resize_factor(im, scale_factor, interp) # If the new image is larger, we crop it if new_im.shape[0] > im.shape[0]: new_im = CaffeinatedAbstract.crop_center(new_im, im.shape[1], im.shape[0]) # If the new image is smaller, we pad it elif new_im.shape[0] < im.shape[0]: padded = np.full_like(im, border_value) start_row = (padded.shape[0] // 2) - (new_im.shape[0] // 2) start_col = (padded.shape[1] // 2) - (new_im.shape[1] // 2) end_row = start_row + new_im.shape[0] end_col = start_col + new_im.shape[1] padded[start_row:end_row, start_col:end_col] = new_im new_im = padded # Move the image to the desired sides (used to downscale tools and still keep them # attached to the border of the image) if 'top' in clip_sides: M = np.float32([[1, 0, 0], [0, 1, -start_row]]) new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.BORDER_CONSTANT, border_value) if 'left' in clip_sides: M = np.float32([[1, 0, -start_col], [0, 1, 0]]) new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.BORDER_CONSTANT, border_value) if 'bottom' in clip_sides: M = np.float32([[1, 0, 0], [0, 1, start_row]]) new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.BORDER_CONSTANT, border_value) if 'right' in clip_sides: M = np.float32([[1, 0, start_col], [0, 1, 0]]) new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.BORDER_CONSTANT, border_value) return new_im # # @brief Could flip the image or not. # # @param[in] im Image or list of images. If list, all images are either flipped or not. # # @returns the image (maybe flipped) maybe just the original one. @staticmethod def random_fliplr(im, not_used = None): if common.randbin(): if isinstance(im, list): return [ CaffeinatedAbstract.fliplr(i) for i in im ] else: return CaffeinatedAbstract.fliplr(im) else: return im # # @brief Could flip the image or not. # # @param[in] im Image or list of images. If list, all images are either flipped or not. # # @returns the image (maybe flipped) maybe just the original one. @staticmethod def random_flipud(im, not_used = None): if common.randbin(): if isinstance(im, list): return [ CaffeinatedAbstract.flipud(i) for i in im ] else: return CaffeinatedAbstract.flipud(im) else: return im # # @brief Add motion blur in a specific direction. # # @param[in] im Input image. # @param[in] mask Pass a foreground mask if you wanna apply the motion just in the # foreground. # @param[in] apply_on Either 'bg', 'fg' or 'both'. # @param[in] ks Size of the convolution kernel to be applied. # @param[in] phi_deg Angle of rotation in degrees. Default is zero, so the motion will be # horizontal. # # @returns the blured images. @staticmethod def directional_motion_blur(im, phi_deg = 0, ks = 15): # Generating the kernel kernel = np.zeros((ks, ks)) kernel[int((ks - 1) / 2),:] = np.ones(ks) / ks # Rotate image if the user wants to simulate motion in a particular direction # rot_im = CaffeinatedAbstract.rotate_bound(im, phi_deg, cv2.INTER_CUBIC) # rot_im_blur = cv2.filter2D(rot_im, -1, kernel) # new_im = CaffeinatedAbstract.rotate_bound(rot_im_blur, -phi_deg, cv2.INTER_CUBIC) # tly = (new_im.shape[0] - im.shape[0]) // 2 # tlx = (new_im.shape[1] - im.shape[1]) // 2 # new_im = new_im[tly:tly + im.shape[0], tlx:tlx + im.shape[1]] # FIXME: We keep just horizontal motion to investigate drop in performance new_im = cv2.filter2D(im, -1, kernel) return new_im # # @brief Random motion blur. Both foreground and background images must have the same size. # # @param[in] im Input image. # @param[in] mask Mask of the foreground object that will appear blurred within the # image. # @param[in] rho Magnitude in pixels of the foreground motion vector. # @param[in] phi_deg Angle in degrees of the motion vector. # @param[in] interlaced Random interlacing will be added. Some lines of the foreground will # move and others will not. # @param[in] alpha Weight for the weighted sum. Default value is 0.5. # # @returns the blurred image. @staticmethod def weighted_sum_motion_blur(im, mask, rho, phi_deg, interlaced = False, alpha = 0.5): assert(im.shape[0] == mask.shape[0]) assert(im.shape[1] == mask.shape[1]) # Compute random motion vector phi = common.deg_to_rad(phi_deg) tx = rho * np.cos(phi) ty = rho * np.sin(phi) # Translation matrix trans_mat = np.eye(3) trans_mat[0, 2] = tx trans_mat[1, 2] = ty mat = trans_mat[:2, :3] # Warp current image and mask according to the motion vector im_warped = cv2.warpAffine(im, mat, (im.shape[1], im.shape[0]), flags = cv2.INTER_CUBIC) mask_warped = cv2.warpAffine(mask, mat, (im.shape[1], im.shape[0]), flags = cv2.INTER_NEAREST) # Interlacing if interlaced: mask_warped_orig = mask_warped.copy() lines_with_mask = np.unique(np.nonzero(mask_warped)[0]).tolist() if lines_with_mask: num_lines_to_remove = np.random.randint(len(lines_with_mask)) random.shuffle(lines_with_mask) lines_with_mask = lines_with_mask[:num_lines_to_remove] for i in lines_with_mask: mask_warped[i,:] = 0 # Combine both images new_im = im.copy() new_im[mask_warped > 0] = np.round( alpha * im[mask_warped > 0] + (1. - alpha) * im_warped[mask_warped > 0] ).astype(np.uint8) # Blur if interlaced if interlaced: ksize = 3 blurred = cv2.GaussianBlur(new_im, (ksize, ksize), 0) new_im[mask_warped_orig > 0] = blurred[mask_warped_orig > 0] return new_im # # @brief Adds or subtracts intensity in different parts of the image using Perlin noise. # # @param[in] im Input image. # # @returns the augmented image. @staticmethod def random_local_brightness_augmentation(im, intensity_start = 50., intensity_stop = 200., intensity_step = 50., shape_start = 1., shape_stop = 5., shape_step = 1.): # Generate random illumination change range intensity_options = np.arange(intensity_start, intensity_stop + intensity_step, intensity_step) change_choice = np.random.choice(intensity_options) # Generate Perlin blob size, larger numbers mean smaller blobs shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step) shape_choice = np.random.choice(shape_options) # Generate Perlin additive noise mask pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice) * change_choice \ - .5 * change_choice pn = np.dstack((pn, pn, pn)) # Modify the image: HSV option # hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64) # hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + pn, 0, 255)) # augmented = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) # Additive value on BGR augmented = np.round(np.clip(im.astype(np.float64) + pn, 0., 255.)).astype(np.uint8) return augmented # # @brief Adds or subtracts intensity in different parts of the image using Perlin noise. # # @param[in] im Input image. # # @returns the augmented image. @staticmethod def random_local_contrast_augmentation(im, shape_start = 1., shape_stop = 5., shape_step = 1.): # Choose minimum and maximum contrast randomly contrast_min = random.choice([0.5, 0.6, 0.7, 0.8]) contrast_max = random.choice([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]) # Generate Perlin blob size, larger numbers mean smaller blobs shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step) shape_choice = np.random.choice(shape_options) # Generate Perlin additive noise mask pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice, minval = contrast_min, maxval = contrast_max) pn = np.dstack((pn, pn, pn)) # Modify the image augmented = np.round(np.clip(np.multiply(im.astype(np.float64), pn), 0, 255)).astype(np.uint8) return augmented # # @brief Global (as in same additive value added to all pixels) brightness augmentation. # # @param[in] im Input image. # # @returns the augmented image. @staticmethod def random_global_brightness_augmentation(im, intensity_start = -50, intensity_stop = 50, intensity_step = 10): # Generate random illumination change intensity_options = np.arange(intensity_start, intensity_stop + intensity_step, intensity_step) change_choice = np.random.choice(intensity_options) # Additive change on Value of HSV # hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64) # hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + change_choice, 0., 255.)) # hsv = hsv.astype(np.uint8) # augmented = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) # Additive change on all channels of BGR augmented = np.clip(im.astype(np.float64) + change_choice, 0, 255).astype(np.uint8) return augmented # # @brief Global contrast (multiplicative) augmentation. # # @param[in] im Input image. # # @returns the augmented image. @staticmethod def random_global_contrast_augmentation(im): contrast_choice = random.choice([0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]) augmented = np.round(np.clip(np.multiply(im.astype(np.float64), contrast_choice), 0, 255)).astype(np.uint8) return augmented # # @brief Bernoulli motion blur. # # @param[in] im Image or list of images. # @param[in] mask Mask of moving object. # @param[in] max_mag Maximum amount of pixels of displacement. # @param[in] max_ang Maximum angle of the motion vector. Default is 360, i.e. can move in any # direction. # # @returns the images with motion blur with a probability p. @staticmethod def random_weighted_sum_motion_blur(im, mask, max_mag = 32, max_ang = 360): rho = np.random.randint(max_mag) phi_deg = np.random.randint(max_ang) interlaced = common.randbin() if isinstance(im, list): return [ CaffeinatedAbstract.weighted_sum_motion_blur(i, m, rho, phi_deg, interlaced) for i, m in zip(im, mask) ] else: return CaffeinatedAbstract.weighted_sum_motion_blur(im, mask, rho, phi_deg, interlaced) # # @brief Converts an image from BGR to BRG. # # @param[in] im BGR image. # # @returns an image converted to BRG. @staticmethod def bgr2brg(im): return im[..., [0, 2, 1]] # # @brief Bernoulli BGR to BRG swapping. # # @param[in] im Image or list of images. # # @returns the image with the green-red channels swapped with a probability of 0.5. @staticmethod def random_brg(im): if common.randbin(): if isinstance(im, list): return [ CaffeinatedAbstract.bgr2brg(i) for i in im ] else: return CaffeinatedAbstract.bgr2brg(im) else: return im # # @brief Rotates the image over itself a random number of degrees. # # @param[in] im Input image, numpy array. # @param[in] deg_delta The range of possible rotation is +- deg_delta. # @param[in] interp Interpolation method: lanczos, linear, cubic, nearest. # # @returns the rotated image. @staticmethod def random_rotation(im, deg_delta, interp): max_ang = deg_delta min_ang = -1. * max_ang ang = random.uniform(min_ang, max_ang) new_im = None if isinstance(im, list): new_im = [ CaffeinatedAbstract.rotate_and_crop(i, ang, interp) for i in im ] else: new_im = CaffeinatedAbstract.rotate_and_crop(im, ang, interp) return new_im # # @brief Resizes an imaged to the desired width while keeping proportions. # # @param[in] im Image to be resized. # @param[in] new_w New width. # @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos. # # @returns a resized image. @staticmethod def resize_width(im, new_w, interp = None): assert(im.dtype == np.uint8) # If no interpolation method is chosen we select the most convenient depending on whether # the user is upsampling or downsampling the image if interp is None: interp = cv2.INTER_AREA if new_w < im.shape[1] else cv2.INTER_LANCZOS4 ratio = float(im.shape[0]) / float(im.shape[1]) new_h = int(round(new_w * ratio)) new_im = cv2.resize(im, (new_w, new_h), interpolation=interp) return new_im # # @brief Resizes an imaged to the desired width while keeping proportions. # # @param[in] im Image to be resized. # @param[in] new_h New height. # @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos. # # @returns a resized image. @staticmethod def resize_height(im, new_h, interp): assert(im.dtype == np.uint8) ratio = float(im.shape[0]) / float(im.shape[1]) new_w = int(round(new_h / ratio)) # imethod = PIL_interp_method[interp] # new_im = np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod)) new_im = cv2.resize(im, (new_w, new_h), interpolation=interp) return new_im # # @brief Scales an image to a desired factor of the original one. # # @param[in] im Image to be resized. # @param[in] scale_factor Factor to scale up or down the image. # @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos. # # @returns a resized image. @staticmethod def resize_factor(im, scale_factor, interp): new_w = int(round(im.shape[1] * scale_factor)) return CaffeinatedAbstract.resize_width(im, new_w, interp) # # @brief Scales an image to a desired factor of the original one. # # @param[in] im Image to be resized. # @param[in] new_w New width. # @param[in] new_h New width. # @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos. # # @returns a resized image. @staticmethod def resize(im, new_w, new_h, interp): # imethod = PIL_interp_method[interp] # new_im = scipy.misc.imresize(im, (new_h, new_w), interp = interp).astype(im.dtype) # return np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod), # dtype = im.dtype) new_im = cv2.resize(im, (new_w, new_h), interpolation=interp) return new_im # # @returns a crop of shape (new_h, new_w). # @staticmethod def crop_center(im, new_w, new_h): start_x = im.shape[1] // 2 - (new_w // 2) start_y = im.shape[0] // 2 - (new_h // 2) return im[start_y:start_y + new_h, start_x:start_x + new_w].copy() # # @brief Rotatation of an image with black bounds around it, as it would be # expected. A positive rotation angle results in a clockwise rotation. # # @param[in] image Numpy ndarray. # @param[in] angle Angle in degrees. # # @returns the rotated image. @staticmethod def rotate_bound(image, angle, interp): # Grab the dimensions of the image and then determine the center (h, w) = image.shape[:2] (cX, cY) = (w / 2, h / 2) # Grab the rotation matrix (applying the negative of the # angle to rotate clockwise), then grab the sine and cosine # (i.e., the rotation components of the matrix) M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0) cos = np.abs(M[0, 0]) sin = np.abs(M[0, 1]) # Compute the new bounding dimensions of the image nW = int((h * sin) + (w * cos)) nH = int((h * cos) + (w * sin)) # Adjust the rotation matrix to take into account translation M[0, 2] += (nW / 2) - cX M[1, 2] += (nH / 2) - cY # Perform the actual rotation and return the image return cv2.warpAffine(image, M, (nW, nH), flags = interp) # # brief Rotates an image over a centre point given and leaves the whole # image inside. Clockwise rotation of the image. # # @param[in] im Numpy ndarray. # @param[in] centre (x, y) in image coordinates. # @param[in] angle Angle in degrees. # @param[in] interp OpenCV interpolation method. @staticmethod def rotate_bound_centre(im, centre, deg, interp): cm_x = centre[0] cm_y = centre[1] # Build the rotation matrix rot_mat = cv2.getRotationMatrix2D((cm_y, cm_x), -deg, 1.0) rot_mat_hom = np.zeros((3, 3)) rot_mat_hom[:2,:] = rot_mat rot_mat_hom[2, 2] = 1 # Find the coordinates of the corners in the rotated image h = im.shape[0] w = im.shape[1] tl = np.array([0, 0, 1]).reshape((3, 1)) tr = np.array([w - 1, 0, 1]).reshape((3, 1)) bl = np.array([0, h - 1, 1]).reshape((3, 1)) br = np.array([w - 1, h - 1, 1]).reshape((3, 1)) tl_rot = np.round(np.dot(rot_mat_hom, tl)).astype(np.int) tr_rot = np.round(np.dot(rot_mat_hom, tr)).astype(np.int) bl_rot = np.round(np.dot(rot_mat_hom, bl)).astype(np.int) br_rot = np.round(np.dot(rot_mat_hom, br)).astype(np.int) # Compute the size of the new image from the coordinates of the rotated one so that # we add black bounds around the rotated one min_x = min([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]]) max_x = max([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]]) min_y = min([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]]) max_y = max([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]]) new_w = max_x + 1 - min_x new_h = max_y + 1 - min_y # Correct the translation so that the rotated image lies inside the window rot_mat[0, 2] -= min_x rot_mat[1, 2] -= min_y return cv2.warpAffine(im, rot_mat, (new_w[0], new_h[0]), flags = interp) # # @brief Clockwise rotation plus crop (so that there is no extra added black background). # # @details The crop is done based on a rectangle of maximal area inside the rotated region. # # @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w). # @param[in] ang Angle in degrees. # @param[in] interp Interpolation method: lanczos, linear, cubic, nearest. # # @returns the rotated image. @staticmethod def rotate_and_crop(im, ang, interp): # Rotate image rotated = CaffeinatedAbstract.rotate_bound(im, ang, interp) # Calculate cropping area wr, hr = geometry.rotated_rect_with_max_area(im.shape[1], im.shape[0], common.deg_to_rad(ang)) wr = int(np.floor(wr)) hr = int(np.floor(hr)) # Centre crop rotated = CaffeinatedAbstract.crop_center(rotated, wr, hr) return rotated # # @brief This method deinterlaces an image using ffmpeg. # # @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w). # # @returns the deinterlaced image. @staticmethod def deinterlace(im, ext = '.png'): input_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext output_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext # Save image in a temporary folder cv2.imwrite(input_path, im) # Deinterlace using ffmpeg common.shell('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path) # Read deinterlaced image dei = cv2.imread(output_path) # Remove image from temporary folder common.rm(input_path) common.rm(output_path) return dei @staticmethod def gaussian_noise(im, mean=0, std=20): noise = np.random.normal(mean, std, im.shape) return np.round(np.clip(im.astype(np.float64) + noise, 0, 255)).astype(np.uint8) # # @rteurns a gamma corrected image. # @staticmethod def adjust_gamma(im, gamma = 1.0): inv_gamma = 1.0 / gamma table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8") return cv2.LUT(im, table) # # @brief Draws an horizontal gradient image. # # @returns the image of the gradient. @staticmethod def draw_grad_lr(height, width, left_colour, right_colour): return (np.ones((height, width)) * np.linspace(left_colour, right_colour, width)).astype(np.uint8) # # @brief Draws an horizontal gradient image. # # @returns the image of the gradient. @staticmethod def draw_grad_ud(height, width, left_colour, right_colour): return (np.ones((height, width)) * np.linspace(left_colour, right_colour, width)).astype(np.uint8).T # # @brief FIXME: does not work properly when image is dark @staticmethod def detect_endoscopic_circle_bbox(im): # Edge detection max_black_intensity = 10 gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) kernel = np.ones((3, 3), np.uint8) dilation = cv2.dilate(gray, kernel, iterations = 1) _, thresh = cv2.threshold(dilation, max_black_intensity, 255, cv2.THRESH_BINARY) # Detect contour of largest area _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnt = max(contours, key = cv2.contourArea) ((xc, yc), radius) = cv2.minEnclosingCircle(cnt) x = xc - radius y = yc - radius w = 2 * radius h = 2 * radius # x, y, w, h = cv2.boundingRect(cnt) return int(x), int(y), int(w), int(h) @staticmethod def crop_endoscopic_circle(im): # Detect endoscopic circle has_circle = True # TODO if not has_circle: return im x, y, w, h = CaffeinatedAbstract.detect_endoscopic_circle_bbox(im) cropped = im[y:y + h, x:x + w].copy() return cropped # # @brief Function to add specular reflections to an image. # # TODO # # @staticmethod def add_specular_noise(): pass # # @brief Skeletonisation of a binary image [0, 255]. # # @param[in] im Input binary image. Binary means either some values are zero and some # others different from zero. Different from 0 can be 1 and 255. # # @returns a binary image (0, 255) with the skeleton of the image. @staticmethod def skeleton(im): assert(len(im.shape) == 2) sk = skimage.morphology.skeletonize_3d(im.astype(bool)) return sk # # @brief Pads and image with extra pixels according to a newly specified size. # # @param[in] tlx Integer that represents the top left corner column. # @param[in] tly Integer that represents the top left corner row. # @param[in] brx Integer that represents the bottom right corner column. # @param[in] bry Integer that represents the bottom right corner row. # @param[in] width Width of the new image. # @param[in] height Height of the new image. # @param[in] intensity Integer of the padding pixels. # # @returns nothing. def pad(self, tlx, tly, brx, bry, width, height, intensity): assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \ and isinstance(bry, type(1))) assert(tlx <= brx) assert(tly <= bry) assert(width >= self.width) assert(height >= self.height) assert(isinstance(intensity, type(1))) # Create image of the new size new_raw_frame = None new_pixel = None if len(self._raw_frame.shape) == 2: new_raw_frame = np.empty((height, width), dtype=self._raw_frame.dtype) new_pixel = intensity elif len(self._raw_frame.shape) == 3: new_raw_frame = np.empty((height, width, self._raw_frame.shape[2]), dtype=self._raw_frame.dtype) new_pixel = np.empty((self._raw_frame.shape[2],), dtype=self._raw_frame.dtype) new_pixel.fill(intensity) else: raise ValueError('[image.CaffeinatedAbstract.pad] Error, image dimension ' \ + str(self._raw_frame.shape) + ' not supported.') new_raw_frame[:,:] = new_pixel # Insert the previous image in the right place new_raw_frame[tly:bry + 1, tlx:brx + 1] = self._raw_frame self._raw_frame = new_raw_frame # # @brief Converts the image into a distance transform (L2 norm) to the edges. # # @param[in] mask_size Size of the Sobel filter kernel. # # @returns nothing. def shape_transform(self, mask_size): assert(isinstance(mask_size, type(0))) # Convert to grayscale # gray = cv2.cvtColor(self._raw_frame, cv2.COLOR_BGR2GRAY) # Sobel filter sobel_x_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 1, 0, ksize=mask_size)) sobel_y_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 0, 1, ksize=mask_size)) sobel_64f = (sobel_x_64f + sobel_y_64f) scaled_sobel = np.uint8(255 * sobel_64f / np.max(sobel_64f)) # Dilate borders kernel = np.ones((mask_size, mask_size), np.uint8) dilated = cv2.dilate(scaled_sobel, kernel, iterations=1) # Threshold _, thresh = cv2.threshold(dilated, 1, 255, cv2.THRESH_BINARY) # Distance transform dist = 255 - (cv2.distanceTransform(255 - thresh, cv2.DIST_L2, maskSize=0)) # Remove backbround dist[self._raw_frame == 0] = 0 self._raw_frame = dist # # @brief Converts image to single channel. # # @returns nothing. def convert_to_single_chan(self): assert(len(self._raw_frame.shape) == 3) # Sanity check: assert that all the pixels of the image have the same intensity value in all the # channels for channel in range(1, self._raw_frame.shape[2]): if not np.array_equal(self._raw_frame[:,:, channel], self._raw_frame[:,:, 0]): raise RuntimeError('[CaffeinatedAbstract] Error, the image ' + self._name + ' has ' \ + 'channels that are different from each other so it is not clear ' \ + 'how to convert it to a proper single channel image.') self._raw_frame = self._raw_frame[:,:, 0] # # @brief Changes the intensity of all the pixels in all the channels to zero. # # @returns nothing. def convert_to_black(self): self._raw_frame.fill(0) # # @brief Filter image with ground truth label, background pixels on the ground truth will be blacked. # # @param[in] caffe_label CaffeinatedLabel. # def filter_with_gt(self, caffe_label): self._raw_frame[caffe_label.raw == 0] = 0 # # @brief Builds an object of type CaffeinatedAbstract from an image file. # # @param[in] path Path to the image file. # @classmethod def from_file(cls, path, *args): # return cls(cv2.imread(path, cv2.IMREAD_COLOR), *args) return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), *args) # # @returns the height of the image. # @property def height(self): return self._raw_frame.shape[0] # # @returns the width of the image. # @property def width(self): return self._raw_frame.shape[1] # # @returns the name of the image. # @property def name(self): return self._name # # @returns the raw internal image. # @property def raw(self): return self._raw_frame # # @returns the data type. # @property def dtype(self): return self._raw_frame.dtype # # @class CaffeinatedImage represents an image that will be used by Caffe so this class should # provide methods to adapt the original image to the type of input # Caffe is expecting. # class CaffeinatedImage(CaffeinatedAbstract): # # @brief Saves the colour image as an attribute of the class. # # @param[in] raw_frame Numpy array with a image, shape (h, w) or (h, w, c). # @param[in] name Id of the image, either the name or the frame number, it will be converted to # str. # @param[in] label Id of the class to whom the image belongs. Only used in case the image is used # for classification purposes. Default value is None. # def __init__(self, raw_frame, name, label = None): # Assert that the image is multi-channel dim = len(raw_frame.shape) if dim < 2: raise RuntimeError('[CaffeinatedImage.__init__], the image provided has [' + \ str(dim) + '] dimensions, only (H x W x C) and (H x W) are supported.') # Assert that the type of label is correct (i.e. integer) when it is not None if label is not None: assert(isinstance(label, type(0))) self._label = label # Call CaffeinatedAbstract constructor super(CaffeinatedImage, self).__init__(raw_frame if dim > 2 else np.expand_dims(raw_frame, axis = 2), name) # # @brief Builds an object of type CaffeinatedImage from file. # # @details Only supports 3-channel colour images. It will raise errors for images with a different # number of channels. # # @param[in] path Path to the image file. # @classmethod # def from_file(cls, path, name): # return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), name) # # @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and # subtracting the training mean. # # @details The mean needs to be subtracted because there is no transform_param section in the input # layer of the test network. # # @param[in] mean_values Numpy ndarray with the per channel mean of the training set. # Shape (channels,). # # @returns an image ready to be processed by Caffe. def convert_to_caffe_input(self, mean_values): # Sanity check: the mean values should be equal to the number of channels of the input image dim = len(self._raw_frame.shape) no_mean_values = mean_values.shape[0] if dim < 3: # 1D or 2D images should have only one channel mean if no_mean_values != 1: raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) + '] mean ' + \ ' values provided, but the image is only 1D or 2D, so only one mean value is required.') elif dim == 3: channels = self._raw_frame.shape[-1] if channels != no_mean_values: raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) \ + '] mean values have been provided but the given image has [' + str(channels) \ + '] channels.') else: raise ValueError('[convert_to_caffe_input] Error, high dimensional image not supported.') return np.transpose(self._raw_frame.astype(np.float32) - mean_values, (2, 0, 1)) # # @brief Resize the image to the desired new width and height. # # @param[in] new_h New height. # @param[in] new_w New width. # # @returns nothing. def resize(self, new_h, new_w): self._raw_frame = cv2.resize(self._raw_frame, (new_w, new_h)) # # @brief Resize the image and keep the original aspect ratio, padding if required. # # @param[in] new_h Height of the new image. # @param[in] new_w Width of the new image. # # @returns nothing. def resize_keeping_aspect(self, new_h, new_w): # Store aspect ratio, width and height about the previous dimensions w = self.width h = self.height ar = float(w) / float(h) # Create new frame respecting the desired new dimensions new_frame = np.zeros((new_h, new_w, self._raw_frame.shape[2]), self._raw_frame.dtype) # We scale the larger size of the image and adapt the other one to the aspect ratio temp_w = None temp_h = None y_start = 0 x_start = 0 if w >= h: temp_w = new_w temp_h = int(temp_w / ar) y_start = int((new_h - temp_h) / 2.0) else: temp_h = new_h temp_w = int(temp_h * ar) x_start = int((new_w - temp_w) / 2.0) # We add black padding if there is free space new_frame[y_start:temp_h + y_start, x_start:temp_w + x_start] = cv2.resize(self._raw_frame, (temp_w, temp_h)) # Copy the final image to the internal buffer that will be displayed self._raw_frame = new_frame # # @brief Converts BGR image to a Caffe datum with shape (C x H x W). # # @returns the Caffe datum serialised as a string. def serialise_to_string(self, jpeg_quality=100): assert(self._raw_frame.dtype == np.uint8) import caffe # caffe_image = self._raw_frame.astype(np.float32) # Convert image to Caffe datum datum = caffe.proto.caffe_pb2.Datum() datum.height, datum.width, datum.channels = self._raw_frame.shape # datum.data = caffe_image.tostring() flags = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] datum.data = cv2.imencode('.jpg', self._raw_frame, flags)[1].tostring() # If the image has a label, it must be an integer if self._label is not None: assert(isinstance(self._label, type(0))) datum.label = self._label return datum.SerializeToString() # # @brief Convert image from uint16 to uint8. # def uint16_to_uint8(self): self._raw_frame = np.round((self._raw_frame.astype(np.float32) / 65535.0) * 255.0).astype(np.uint8) # # @brief Add Gaussian noise to image. # # @param[in] mean Default value is 0. # @param[in] std Default value is 10. # # @returns nothing. def add_gaussian_noise(self, mean = 0, std = 10): # Get image dimensions row, col, ch = self._raw_frame.shape # Add Gaussian noise to the internal image gauss = np.random.normal(mean, std, (row, col, ch)).reshape(row, col, ch) # Convert image to float, add Gaussian noise and convert back to uint8 self._raw_frame = np.round(self._raw_frame.astype(np.float64) + gauss).astype(np.uint8) # # @brief Converts a green screen image with tools to grayscale # adding a bit of noise so that BGR are not kept equal. # @classmethod def gray_tools(cls, im, noise_delta=3): assert(isinstance(im, np.ndarray)) new_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) new_im = cv2.cvtColor(new_im, cv2.COLOR_GRAY2BGR) noise = np.random.randint(-noise_delta, noise_delta + 1, size=new_im.shape) new_im = np.clip(new_im + noise, 0, 255).astype(np.uint8) return new_im # # @brief Convert it to a noisy grayscale image. # def noisy_gray(self, noise_delta=3): self._raw_frame = CaffeinatedImage.gray_tools(self._raw_frame, noise_delta) def random_crop(self, height, width): self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width) @property def shape(self): return self._raw_frame.shape # # @class Caffeinated8UC3Image represents a colour (H x W x 3) CaffeinatedImage. # class Caffeinated8UC3Image(CaffeinatedImage): # # @brief Saves the colour image as an attribute of the class. # # @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c). def __init__(self, frame_bgr, name): # Check that it is a 3-channel BGR image EXPECTED_DIM = 3 EXPECTED_CHANNELS = 3 if len(frame_bgr.shape) != EXPECTED_DIM or frame_bgr.shape[EXPECTED_DIM - 1] != EXPECTED_CHANNELS: raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a shape of ' + \ str(frame_bgr.shape) + '. We expect an image of shape (H x W x ' + \ str(EXPECTED_CHANNELS) + ').') # Check that the image is uint8 EXPECTED_TYPE = np.uint8 if frame_bgr.dtype != EXPECTED_TYPE: raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a type of ' + \ str(frame_bgr.dtype) + ' and we expect ' + str(EXPECTED_TYPE) + '.') super(self.__class__, self).__init__(frame_bgr, name) # # @class CaffeinatedLabel represents a segmentation label that will be used by Caffe so this # class should provide methods to adapt the original image to the type of # input Caffe is expecting. # # @details This class does not support labels that are not grayscale or colour images, that is, # the images provided must be (H x W) or (H x W x C). In case that you provide a label # with shape (H x W x C) this class will make sure that all the channels C have the same # values. This is because a priori it does not make any sense for a pixel to belong to # different classes. class CaffeinatedLabel(CaffeinatedAbstract): # # @brief Stores the label and checks that both dimensions and type are correct for a label. # @details To make a safe conversion to single channel this method will check that all the # pixels of the image have exactly the same intensity value in all the BGR channels. # If this does not happen an exception will be raised. # # @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C). # @param[in] name Name of the label, usually stores the id of the related image. # @param[in] classes Integer that represents the maximum number of classes in the labels, # used for both validation purposes and to convert back/forth to Caffe # input. # @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]), # where K is the maximum number of classes. # @param[in] proba_map Probability maps for all the classes, shape (c, h, w). # def __init__(self, label_image, name, classes, class_map, proba_map = None): # This is 2 because we expect the image to be of shape (H x W) and the intensity of the # pixel to indicate the class that the pixel belongs to EXPECTED_DIM = 2 EXPECTED_LABEL_TYPE = np.uint8 # Store the maximum number of classes after validating that it is in the range [2, 256] assert(isinstance(classes, type(0)) and classes >= 2 and classes <= 256) self._classes = classes # Store the dictionary for class mappings after validating it classes_present = [False] * classes assert(len(class_map.keys()) == classes) for k, v in class_map.items(): assert(isinstance(k, type(0))) assert(isinstance(v, type(0))) assert(k >= 0 and k <= 255) assert(v >= 0 and v < self._classes) classes_present[v] = True assert(all(classes_present)) self._class_map = class_map # Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported dim = len(label_image.shape) if not (dim == 2 or dim == 3): raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \ str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.') # Sanity check: if the label provided is multiple-channel, assert that all the pixels of the image # have the same intensity value in all the channels if dim > EXPECTED_DIM: for channel in range(1, label_image.shape[2]): if not np.array_equal(label_image[:,:, channel], label_image[:,:, 0]): raise RuntimeError('[CaffeinatedLabel] Error, the label provided in ' + name + ' has channels that are ' + \ 'different from each other so it is not clear how to convert it to a proper ' + \ 'single channel label in which the intensity defines the pixel class.') # Sanity check: the image must be uint8, this essentially means that there is a maximum of 256 labels if label_image.dtype != EXPECTED_LABEL_TYPE: raise RuntimeError('[CaffeinatedLabel] Error, a label must be ' + str(EXPECTED_LABEL_TYPE) + '.') # If the image has several channels, we just get one (we already know that all the channels have the # same values if dim == EXPECTED_DIM: raw_label = label_image else: raw_label = label_image[:,:, 0] # Assert that there are no more unique labels than classes unique_classes = np.unique(raw_label) if unique_classes.shape[0] > self._classes: raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' is said to have ' \ + str(self._classes) + ' classes but there are more unique values in it, exactly: ' \ + str(unique_classes)) # Assert thate the intensities in the label are all present in the class_map dictionary for i in unique_classes: if not i in self._class_map: raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' has a pixel with ' \ + 'intensity ' + str(i) + ' but this intensity is not present in the class map.') # Store probability map if provided if proba_map is not None: assert(len(proba_map.shape) == 3) assert(proba_map.shape[0] == classes) assert(proba_map.shape[1] == raw_label.shape[0]) assert(proba_map.shape[2] == raw_label.shape[1]) self._predicted_map = proba_map # Call CaffeinatedAbstract constructor super(CaffeinatedLabel, self).__init__(raw_label, name) # # @brief Builds an object of type CaffeinatedLabel from an image file. # # @param[in] fmaps array_like, shape (c, h, w). # # @param[in] classes Integer that represents the maximum number of classes in the labels, used for # both validation purposes and to convert back/forth to Caffe input. # # @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]), where K is the # maximum number of classes. # @classmethod def from_network_output(cls, fmaps, name, classes, class_map): label_image = fmaps.argmax(axis=0).astype(np.uint8) for k, v in class_map.items(): label_image[label_image == v] = k return cls(label_image, name, classes, class_map, fmaps) # # @brief Convert label to CaffeinatedImage for displaying purposes. # # @param[in] cn Channels of the new image. The labels will be replicated across channels. # # @returns the label converted into a cn-channel CaffeinatedImage. def to_image(self, cn = 3): new_image = np.ndarray((self._raw_frame.shape[0], self._raw_frame.shape[1], cn), self._raw_frame.dtype) for k in range(cn): new_image[:,:, k] = self._raw_frame return CaffeinatedImage(new_image, self._name) # # @brief Converts the label to a Caffe datum. # # @returns a Caffe datum label serialised to string. def serialise_to_string(self): # Sanity check: assert that the type of the label is correct import caffe assert(self._raw_frame.dtype == np.uint8) # Create Caffe datum datum = caffe.proto.caffe_pb2.Datum() datum.height, datum.width = self._raw_frame.shape # if self._classes == 2: # Convert (h, w) -> (1, h, w) # caffe_label = np.expand_dims(self._raw_frame, axis = 0) # caffe_label = self._raw_frame # else: # Create ndarray of binary maps fmaps = np.zeros([self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]], dtype = np.uint8) # k is intensity # v is the class number for k, v in self._class_map.items(): fmaps[v, self._raw_frame == k] = 1 # if self._classes == 2: # Binary case, only one feature map # datum.channels = 1 # caffe_label = np.expand_dims(fmaps[1], axis = 0) # else: # Multi-class case, one feature map per class # datum.channels = self._classes # caffe_label = fmaps # Multi-class case, one feature map per class datum.channels = self._classes caffe_label = fmaps # Convert label[s] to string datum.data = caffe_label.tostring() return datum.SerializeToString() # # @brief Binarises the label. It will be thresholded so that only 0/maxval values are present. # # @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'. # @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'. # # @returns nothing. def binarise(self, thresh = 10, maxval = 1): _, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY) # # @brief Convert intensity-based labels into proper class-index labels. # # @returns an array_like, shape (h, w). def to_classes(self): class_index_frame = self._raw_frame.copy() for k, v in self._class_map.items(): class_index_frame[self._raw_frame == k] = v return class_index_frame # # @brief Maps between intensities [0, 255] to classes [0, K] using the JSON info provided. # # @param[in] intensity Typically an integer [0, 255]. # # @returns the class index of the givel pixel intensity according to the provided class map. def map_intensity_to_class(self, intensity): return self._class_map[intensity] # # @brief Maps between classes and JSON intensities. # # @param[in] class_id Id of the class whose intensity you want to retrieve. # # @returns the intensity corresponding to the given class. def map_class_to_intensity(self, class_id): return {v: k for k, v in self._class_map.items()}[class_id] # # @brief Retrieves a normalised probability map for a particular class. # # @param[in] class_id Id of the class whose probability map you want to retrieve. # # @returns an array_like probability map, shape (h, w). def softmax_predicted_map(self, class_id): assert(self._predicted_map) pmap = np.exp(self._predicted_map - np.amax(self._predicted_map, axis = 0)) pmap /= np.sum(pmap, axis = 0) return pmap[class_id, ...] # # @brief Converts all the feature maps to contour images. # # @param[in] pixel_width Thickness of the border in pixels. # # @returns nothing. def convert_to_contours(self, pixel_width = 5): new_raw_frame = np.zeros_like(self._raw_frame) # If self._predicted_map does not exist, we create it, shape (c, h, w) if not self._predicted_map: self._predicted_map = np.zeros((self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]), dtype=np.uint8) for k in range(self._classes): self._predicted_map[k,:,:][self._raw_frame == self.map_class_to_intensity(k)] = 1 # Draw contours in the new raw frame for k in range(self._classes): (_, cnts, _) = cv2.findContours(self._predicted_map[k], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: # cv2.drawContours(new_raw_frame, [c], -1, (self.map_class_to_intensity(k)), pixel_width) cv2.drawContours(new_raw_frame, [c], -1, self.map_class_to_intensity(k), pixel_width) self._raw_frame = new_raw_frame def random_crop(self, height, width): self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width) # # @brief Calculates the number of classes in the frame, that is the quantity of unique labels. # # @returns an integer that indicates the number of different pixel labels. @property def classes(self): return self._classes # return np.unique(self._raw_frame).shape[0] # # @returns the unnormalised predicted map for all the classes (class_id, height, width). # @property def predicted_map(self): return self._predicted_map @property def class_map(self): return self._class_map # # @class CaffeinatedBinaryLabel behaves as a CaffeinatedLabel but makes sure that the label images provided # only contain two different types or labels. Furthermore, it makes them 0's # and 1's (np.uint8) in case that they are different from these two values. # Say that you provide an image with 0's and 255's as typical ground truth # images, this class will make it 0's and 1's. # class CaffeinatedBinaryLabel(CaffeinatedLabel): # # @brief Stores the label and checks that both, dimensions and type, are correct for a label. # @details If the label provided is not single channel, the label is converted to grayscale with # the OpenCV cvtColour function. It # # @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C). # @param[in] name String that identifies the label, usually a frame number. # @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'. # @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'. # # @returns nothing. def __init__(self, label_image, name, thresh = 10, maxval = 1): # Call CaffeinatedLabel constructor super(self.__class__, self).__init__(label_image, name) # Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported # dim = len(label_image.shape) # if not (dim == 2 or dim == 3): # raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \ # str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.') # If we received a colour image as a label, we convert it to grayscale # if dim == 3: # label_image = cv2.cvtColor(label_image, cv2.COLOR_BGR2GRAY) # If the label image is not binary, that is, if it has more than two unique values, we thresholded it # to ensure that the labels are binary EXPECTED_NO_UNIQUE_VALUES = 2 # As we expect a binary label no_unique_values = np.unique(self._raw_frame).shape[0] if no_unique_values > EXPECTED_NO_UNIQUE_VALUES: _, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY) # # @returns the number of foreground pixels. @property def count_fg_pixels(self): return np.count_nonzero(self._raw_frame) # # @returns the number of background pixels. @property def count_bg_pixels(self): return np.count_nonzero(self._raw_frame == 0) # # @class CaffeinatedImagePair represents a pair of consecutive frames that will be used by Caffe so this # class should provide methods to adapt the original images to the type of # input Caffe is expecting. # class CaffeinatedImagePair(object): # # @brief Saves the colour image as an attribute of the class. # # @param[in] frame_bgr_prev Numpy array with the previous BGR image in the video sequence, shape (h, w, c). # @param[in] frame_bgr_next Numpy array with the current BGR image in the video sequecne, shape (h, w, c). # def __init__(self, frame_bgr_prev, frame_bgr_next): # Sanity check: both images must have 3 dimensions (h, w, c) if len(frame_bgr_prev.shape) != 3 or len(frame_bgr_next.shape) != 3: raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have ' + \ 'three dimensions (i.e. H x W x C).') # Sanity check: both images must have 3 channels if frame_bgr_prev.shape[2] != 3 or frame_bgr_next.shape[2] != 3: raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have three ' + \ 'channels (i. e. BGR)') # Sanity check: both images must have the same height and width if frame_bgr_prev.shape[0] != frame_bgr_next.shape[0] or \ frame_bgr_prev.shape[1] != frame_bgr_next.shape[1]: raise RuntimeError('[CaffeinatedImagePair.__init__] The imaged provided must have the same ' + \ 'dimensions (i.e. height and width).') self._frame_bgr_prev = frame_bgr_prev self._frame_bgr_next = frame_bgr_next # # @brief Builds an object of type CaffeinatedImage from file. # # @details Only supports 3-channel colour images. It will raise errors for images with a different # number of channels. # # @param[in] path_prev Path to the previous image file. # @param[in] path_next Path to the next image file. # @classmethod def from_file(cls, path_prev, path_next): return cls(cv2.imread(path_prev), cv2.imread(path_next)) # # @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and # subtracting the training mean. # # @details The mean needs to be subtracted because there is no transform_param section in the input # layer of the test network. # # @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,). # # @returns an image ready to be processed by Caffe. def convert_to_caffe_input(self, mean_values): no_mean_values = mean_values.shape[0] # Sanity check: the mean values should be equal to the number of channels of the input image if no_mean_values != 6: raise ValueError('[CaffeinatedImagePair.convert_to_caffe_input()] Error, six means are required.') # Subtract mean values from previous frame norm_prev = self._frame_bgr_prev.astype(np.float32) - mean_values[:3] # Subtract mean values from next frame norm_next = self._frame_bgr_next.astype(np.float32) - mean_values[3:] # Sanity checks: both images must have the same shape and be of the same datatype assert(norm_prev.shape[0] == norm_next.shape[0]) assert(norm_prev.shape[1] == norm_next.shape[1]) assert(norm_prev.shape[2] == norm_next.shape[2]) assert(norm_prev.dtype == norm_next.dtype) # Combine both images in a 6-channel image combined_image = np.empty((norm_prev.shape[0], norm_prev.shape[1], 6), dtype = norm_prev.dtype) combined_image[:,:, 0:3] = norm_prev combined_image[:,:, 3:6] = norm_next # Transpose to channel-first Caffe style combined_transposed = np.transpose(combined_image, (2, 0, 1)) return combined_transposed # # @brief Converts BGR image to a Caffe datum with shape (C x H x W). # # @details The training mean is not subtracted from the image because Caffe does this automatically for # the data layer used for training (see the transform_param section of the 'data' layer in the # training prototxt). # # @returns the Caffe datum serialised as a string. @property def serialise_to_string(self): # Sanity checks: both images must have the same shape and be of the same datatype import caffe assert(self._frame_bgr_prev.shape[0] == self._frame_bgr_next.shape[0]) assert(self._frame_bgr_prev.shape[1] == self._frame_bgr_next.shape[1]) assert(self._frame_bgr_prev.shape[2] == self._frame_bgr_next.shape[2]) assert(self._frame_bgr_prev.dtype == self._frame_bgr_next.dtype) # Combine the two images in a single 6-channel image channels = 6 combined_image = np.empty((self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1], channels), \ dtype = self._frame_bgr_prev.dtype) combined_image[:,:, 0:3] = self._frame_bgr_prev combined_image[:,:, 3:6] = self._frame_bgr_next caffe_image = combined_image.astype(np.float32) # Convert image to Caffe datum datum = caffe.proto.caffe_pb2.Datum() datum.height, datum.width, _ = caffe_image.shape datum.channels = channels datum.data = caffe_image.tostring() return datum.SerializeToString() # # @returns the height of the image. @property def height(self): return self._frame_bgr_prev.shape[0] # # @returns the width of the image. @property def width(self): return self._frame_bgr_prev.shape[1] # # @class CaffeinatedImagePlusPrevSeg represents a BGR image with a fourth channel that contains the segmentation of the # previous frame in the video sequence. # class CaffeinatedImagePrevSeg(object): # # @brief Saves the colour image and the previous segmentation as attributes of the class. # # @param[in] prev_seg Numpy array with the predicted segmentation of the previous frame in the sequence, # shape (h, w, c). # @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c). # def __init__(self, prev_seg, frame_bgr): # Sanity check: the image must have three dimensions (h, w, c) and three channels (c = 3) if len(frame_bgr.shape) != 3 or frame_bgr.shape[2] != 3: raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the image provided must ' + \ ' have three dimensions (i.e. H x W x 3).') # Sanity check: the previous mask must have a dimension of two if len(prev_seg.shape) != 2: raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the previous mask must have ' + \ 'two dimensions.') # Sanity check: the frame and the previous mask must have the same dimensions if frame_bgr.shape[0] != prev_seg.shape[0] or frame_bgr.shape[1] != prev_seg.shape[1]: raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the current image and the ' + \ 'previous segmentation must have the same height and width.') self._prev_seg = prev_seg self._frame_bgr = frame_bgr # # @brief Builds an object of type CaffeinatedImage from file. # # @details Only supports 3-channel colour images. It will raise errors for images with a different # number of channels. # # @param[in] path Path to the image file. @classmethod def from_file(cls, path_prev_seg, path_frame_bgr): caffeinated_prev_label = CaffeinatedBinaryLabel.from_file(path_prev_seg) return cls(caffeinated_prev_label.single_channel_label_copy(), cv2.imread(path_frame_bgr)) # # @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and # subtracting the training mean. # # @details The mean needs to be subtracted because there is no transform_param section in the input # layer of the test network. # # @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,). # # @returns an image ready to be processed by Caffe. def convert_to_caffe_input(self, mean_values): colour_channels = 3 no_mean_values = mean_values.shape[0] # Sanity check: the mean values should be equal to the number of channels of the input image if no_mean_values != colour_channels: raise ValueError('[CaffeinatedImagePlusPrevSeg.convert_to_caffe_input] Error, three means are ' + \ 'required.') # Subtract mean values from the current frame norm_frame_bgr = self._frame_bgr.astype(np.float32) - mean_values # Convert previous segmentation to float norm_prev_seg = self._prev_seg.astype(np.float32) # Sanity check: the current normalised image and the segmentation mask must have the same shape and # datatype total_channels = colour_channels + 1 assert(norm_frame_bgr.shape[0] == norm_prev_seg.shape[0]) assert(norm_frame_bgr.shape[1] == norm_prev_seg.shape[1]) assert(norm_frame_bgr.shape[2] == colour_channels) assert(norm_frame_bgr.dtype == norm_prev_seg.dtype) # Combine the current frame with the previous segmentation in a 4-channel image combined_image = np.empty((norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels), dtype = norm_frame_bgr.dtype) combined_image[:,:, :colour_channels] = norm_frame_bgr combined_image[:,:, colour_channels] = norm_prev_seg # Transpose to channel-first Caffe style combined_transposed = np.transpose(combined_image, (2, 0, 1)) return combined_transposed # # @brief Converts BGR image to a Caffe datum with shape (C x H x W). # # @details The training mean is not subtracted from the image because Caffe does this automatically for # the data layer used for training (see the transform_param section of the 'data' layer in the # training prototxt). # # @returns the Caffe datum serialised as a string. @property def serialise_to_string(self): # Sanity checks: both images must have the same shape and be of the same datatype import caffe assert(self._frame_bgr.shape[0] == self._prev_seg.shape[0]) assert(self._frame_bgr.shape[1] == self._prev_seg.shape[1]) assert(self._frame_bgr.dtype == self._prev_seg.dtype) # Combine the current image and the previous segmentation in a single 4-channel image colour_channels = 3 total_channels = colour_channels + 1 combined_image = np.empty((self._frame_bgr.shape[0], self._frame_bgr.shape[1], total_channels), \ dtype = self._frame_bgr.dtype) combined_image[:,:, :colour_channels] = self._frame_bgr combined_image[:,:, colour_channels] = self._prev_seg caffe_image = combined_image.astype(np.float32) # Convert image to Caffe datum datum = caffe.proto.caffe_pb2.Datum() datum.height, datum.width, _ = caffe_image.shape datum.channels = total_channels datum.data = caffe_image.tostring() return datum.SerializeToString() # # @returns the height of the image. @property def height(self): return self._frame_bgr.shape[0] # # @returns the width of the image. @property def width(self): return self._frame_bgr.shape[1] # # @brief Convert a binary probability map into a beautiful image. # # @param[in] probmap 2D floating point probability map, shape (height, width). # # @returns a fancy BGR image. def make_it_pretty(probmap, vmin = 0, vmax = 1, colourmap = 'plasma', eps = 1e-3): assert(len(probmap.shape) == 2) assert(np.max(probmap) < vmax + eps) assert(np.min(probmap) > vmin - eps) height = probmap.shape[0] width = probmap.shape[1] # Create figure without axes fig = plt.figure(frameon = False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') # Plot figure plt.imshow(probmap, cmap = colourmap, vmin = vmin, vmax = vmax) # vmin/vmax adjust thesholds fig.canvas.draw() # Convert plot to numpy array data = np.fromstring(fig.canvas.tostring_rgb(), dtype = np.uint8, sep = '') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, )) # Remove left/right borders left_right_offset = 0 i = 0 left_intensity = data[0, left_right_offset, 0] right_intensity = data[0, -1, 0] min_intensity = 255 # Assert that the values for all the rows are equal for the columns 'offset' and '-offset' left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False while left_intensity == right_intensity and left_intensity >= min_intensity and left_side_equal and right_side_equal: left_right_offset += 1 left_intensity = data[0, left_right_offset, 0] right_intensity = data[0, -left_right_offset - 1, 0] left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False # Remove top/bottom borders top_bottom_offset = 0 i = 0 top_intensity = data[top_bottom_offset, 0, 0] bottom_intensity = data[-1, 0, 0] min_intensity = 255 # Assert that the values for all the rows are equal for the columns 'offset' and '-offset' top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False while top_intensity == bottom_intensity and top_intensity >= min_intensity and top_side_equal and bottom_side_equal: top_bottom_offset += 1 top_intensity = data[top_bottom_offset, 0, 0] bottom_intensity = data[-top_bottom_offset - 1, 0, 0] top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False # Note: 1 is added to 'left_right_offset' because matplotlib tends to leave a border on the left one # pixel thicker than on the right cropped_image = data[top_bottom_offset:data.shape[0] - top_bottom_offset, left_right_offset + 1:data.shape[1] - left_right_offset] # Resize to original size resized_image = cv2.resize(cropped_image, (width, height)) assert(resized_image.shape[0] == height) assert(resized_image.shape[1] == width) assert(resized_image.shape[2] == 3) # Convert RGB to BGR final_image = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR) return final_image # This module cannot be executed as a script because it is not a script :) if __name__ == "__main__": print >> sys.stderr, 'Error, this module is not supposed to be executed by itself.' sys.exit(1)
[ "numpy.sum", "numpy.ones", "matplotlib.pyplot.figure", "numpy.arange", "common.randbin", "cv2.imencode", "cv2.filter2D", "cv2.cvtColor", "matplotlib.pyplot.imshow", "cv2.imwrite", "numpy.max", "cv2.LUT", "cv2.minEnclosingCircle", "numpy.flipud", "numpy.min", "cv2.createCLAHE", "numpy.squeeze", "sys.exit", "tempfile.gettempdir", "random.choice", "common.rm", "numpy.nonzero", "numpy.array", "numpy.abs", "cv2.getRotationMatrix2D", "numpy.unique", "numpy.meshgrid", "common.shell", "numpy.transpose", "numpy.dstack", "numpy.stack", "cv2.distanceTransform", "numpy.expand_dims", "numpy.eye", "cv2.GaussianBlur", "numpy.empty", "numpy.clip", "cv2.warpAffine", "numpy.mean", "numpy.sin", "numpy.ndarray", "random.randint", "cv2.dilate", "numpy.linspace", "cv2.drawContours", "cv2.resize", "cv2.Canny", "numpy.dot", "cv2.Sobel", "numpy.count_nonzero", "common.gen_rand_str", "numpy.zeros", "numpy.amax", "numpy.array_equal", "cv2.findContours", "numpy.random.seed", "random.shuffle", "numpy.floor", "numpy.random.randint", "numpy.random.normal", "common.deg_to_rad", "numpy.linalg.solve", "numpy.round", "numpy.full_like", "numpy.zeros_like", "caffe.proto.caffe_pb2.Datum", "numpy.random.choice", "numpy.random.shuffle", "common.path_exists", "numpy.median", "numpy.fliplr", "numpy.cos", "random.uniform", "cv2.threshold", "numpy.float32", "cv2.imread", "numpy.sqrt" ]
[((533, 553), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (547, 553), True, 'import numpy as np\n'), ((562, 587), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'int'}), '(256, dtype=int)\n', (571, 587), True, 'import numpy as np\n'), ((594, 614), 'numpy.random.shuffle', 'np.random.shuffle', (['p'], {}), '(p)\n', (611, 614), True, 'import numpy as np\n'), ((1483, 1527), 'numpy.array', 'np.array', (['[[0, 1], [0, -1], [1, 0], [-1, 0]]'], {}), '([[0, 1], [0, -1], [1, 0], [-1, 0]])\n', (1491, 1527), True, 'import numpy as np\n'), ((1996, 2041), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', 'height'], {'endpoint': '(False)'}), '(0, scale, height, endpoint=False)\n', (2007, 2041), True, 'import numpy as np\n'), ((2056, 2100), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', 'width'], {'endpoint': '(False)'}), '(0, scale, width, endpoint=False)\n', (2067, 2100), True, 'import numpy as np\n'), ((2114, 2139), 'numpy.meshgrid', 'np.meshgrid', (['lin_x', 'lin_y'], {}), '(lin_x, lin_y)\n', (2125, 2139), True, 'import numpy as np\n'), ((2183, 2194), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (2189, 2194), True, 'import numpy as np\n'), ((2209, 2220), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (2215, 2220), True, 'import numpy as np\n'), ((2611, 2621), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2618, 2621), True, 'import numpy as np\n'), ((2632, 2642), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (2639, 2642), True, 'import numpy as np\n'), ((2902, 2915), 'numpy.sum', 'np.sum', (['(u * v)'], {}), '(u * v)\n', (2908, 2915), True, 'import numpy as np\n'), ((2925, 2939), 'numpy.sum', 'np.sum', (['(u ** 2)'], {}), '(u ** 2)\n', (2931, 2939), True, 'import numpy as np\n'), ((2949, 2963), 'numpy.sum', 'np.sum', (['(v ** 2)'], {}), '(v ** 2)\n', (2955, 2963), True, 'import numpy as np\n'), ((2973, 2991), 'numpy.sum', 'np.sum', (['(u ** 2 * v)'], {}), '(u ** 2 * v)\n', (2979, 2991), True, 'import numpy as np\n'), ((3001, 3019), 'numpy.sum', 'np.sum', (['(u * v ** 2)'], {}), '(u * v ** 2)\n', (3007, 3019), True, 'import numpy as np\n'), ((3029, 3043), 'numpy.sum', 'np.sum', (['(u ** 3)'], {}), '(u ** 3)\n', (3035, 3043), True, 'import numpy as np\n'), ((3053, 3067), 'numpy.sum', 'np.sum', (['(v ** 3)'], {}), '(v ** 3)\n', (3059, 3067), True, 'import numpy as np\n'), ((3107, 3141), 'numpy.array', 'np.array', (['[[Suu, Suv], [Suv, Svv]]'], {}), '([[Suu, Suv], [Suv, Svv]])\n', (3115, 3141), True, 'import numpy as np\n'), ((3209, 3230), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (3224, 3230), True, 'import numpy as np\n'), ((3353, 3395), 'numpy.sqrt', 'np.sqrt', (['((x - xc_1) ** 2 + (y - yc_1) ** 2)'], {}), '((x - xc_1) ** 2 + (y - yc_1) ** 2)\n', (3360, 3395), True, 'import numpy as np\n'), ((3412, 3425), 'numpy.mean', 'np.mean', (['Ri_1'], {}), '(Ri_1)\n', (3419, 3425), True, 'import numpy as np\n'), ((3442, 3467), 'numpy.sum', 'np.sum', (['((Ri_1 - R_1) ** 2)'], {}), '((Ri_1 - R_1) ** 2)\n', (3448, 3467), True, 'import numpy as np\n'), ((3482, 3517), 'numpy.sum', 'np.sum', (['((Ri_1 ** 2 - R_1 ** 2) ** 2)'], {}), '((Ri_1 ** 2 - R_1 ** 2) ** 2)\n', (3488, 3517), True, 'import numpy as np\n'), ((3641, 3657), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (3650, 3657), True, 'import numpy as np\n'), ((3758, 3788), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (3767, 3788), False, 'import cv2\n'), ((81954, 81979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (81964, 81979), True, 'import matplotlib.pyplot as plt\n'), ((82060, 82117), 'matplotlib.pyplot.imshow', 'plt.imshow', (['probmap'], {'cmap': 'colourmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(probmap, cmap=colourmap, vmin=vmin, vmax=vmax)\n', (82070, 82117), True, 'import matplotlib.pyplot as plt\n'), ((84579, 84621), 'cv2.resize', 'cv2.resize', (['cropped_image', '(width, height)'], {}), '(cropped_image, (width, height))\n', (84589, 84621), False, 'import cv2\n'), ((84795, 84841), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_RGB2BGR'], {}), '(resized_image, cv2.COLOR_RGB2BGR)\n', (84807, 84841), False, 'import cv2\n'), ((85061, 85072), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (85069, 85072), False, 'import sys\n'), ((3153, 3189), 'numpy.array', 'np.array', (['[Suuu + Suvv, Svvv + Suuv]'], {}), '([Suuu + Suvv, Svvv + Suuv])\n', (3161, 3189), True, 'import numpy as np\n'), ((5475, 5499), 'common.path_exists', 'common.path_exists', (['path'], {}), '(path)\n', (5493, 5499), False, 'import common\n'), ((7125, 7144), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (7138, 7144), True, 'import numpy as np\n'), ((7164, 7216), 'cv2.drawContours', 'cv2.drawContours', (['new_mask', 'cnts', '(-1)', '(255)', 'thickness'], {}), '(new_mask, cnts, -1, 255, thickness)\n', (7180, 7216), False, 'import cv2\n'), ((7496, 7531), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2Lab'], {}), '(im, cv2.COLOR_BGR2Lab)\n', (7508, 7531), False, 'import cv2\n'), ((7555, 7613), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': 'clip_limit', 'tileGridSize': '(8, 8)'}), '(clipLimit=clip_limit, tileGridSize=(8, 8))\n', (7570, 7613), False, 'import cv2\n'), ((7687, 7723), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_Lab2BGR'], {}), '(lab, cv2.COLOR_Lab2BGR)\n', (7699, 7723), False, 'import cv2\n'), ((7854, 7867), 'numpy.fliplr', 'np.fliplr', (['im'], {}), '(im)\n', (7863, 7867), True, 'import numpy as np\n'), ((7995, 8008), 'numpy.flipud', 'np.flipud', (['im'], {}), '(im)\n', (8004, 8008), True, 'import numpy as np\n'), ((10445, 10481), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (10459, 10481), False, 'import random\n'), ((14112, 14148), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (14126, 14148), False, 'import random\n'), ((18588, 18604), 'common.randbin', 'common.randbin', ([], {}), '()\n', (18602, 18604), False, 'import common\n'), ((19116, 19132), 'common.randbin', 'common.randbin', ([], {}), '()\n', (19130, 19132), False, 'import common\n'), ((20010, 20028), 'numpy.zeros', 'np.zeros', (['(ks, ks)'], {}), '((ks, ks))\n', (20018, 20028), True, 'import numpy as np\n'), ((20681, 20709), 'cv2.filter2D', 'cv2.filter2D', (['im', '(-1)', 'kernel'], {}), '(im, -1, kernel)\n', (20693, 20709), False, 'import cv2\n'), ((21694, 21720), 'common.deg_to_rad', 'common.deg_to_rad', (['phi_deg'], {}), '(phi_deg)\n', (21711, 21720), False, 'import common\n'), ((21833, 21842), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (21839, 21842), True, 'import numpy as np\n'), ((22023, 22097), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'mat', '(im.shape[1], im.shape[0])'], {'flags': 'cv2.INTER_CUBIC'}), '(im, mat, (im.shape[1], im.shape[0]), flags=cv2.INTER_CUBIC)\n', (22037, 22097), False, 'import cv2\n'), ((22122, 22200), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'mat', '(im.shape[1], im.shape[0])'], {'flags': 'cv2.INTER_NEAREST'}), '(mask, mat, (im.shape[1], im.shape[0]), flags=cv2.INTER_NEAREST)\n', (22136, 22200), False, 'import cv2\n'), ((23613, 23688), 'numpy.arange', 'np.arange', (['intensity_start', '(intensity_stop + intensity_step)', 'intensity_step'], {}), '(intensity_start, intensity_stop + intensity_step, intensity_step)\n', (23622, 23688), True, 'import numpy as np\n'), ((23713, 23748), 'numpy.random.choice', 'np.random.choice', (['intensity_options'], {}), '(intensity_options)\n', (23729, 23748), True, 'import numpy as np\n'), ((23845, 23904), 'numpy.arange', 'np.arange', (['shape_start', '(shape_stop + shape_step)', 'shape_step'], {}), '(shape_start, shape_stop + shape_step, shape_step)\n', (23854, 23904), True, 'import numpy as np\n'), ((23928, 23959), 'numpy.random.choice', 'np.random.choice', (['shape_options'], {}), '(shape_options)\n', (23944, 23959), True, 'import numpy as np\n'), ((24140, 24163), 'numpy.dstack', 'np.dstack', (['(pn, pn, pn)'], {}), '((pn, pn, pn))\n', (24149, 24163), True, 'import numpy as np\n'), ((24951, 24986), 'random.choice', 'random.choice', (['[0.5, 0.6, 0.7, 0.8]'], {}), '([0.5, 0.6, 0.7, 0.8])\n', (24964, 24986), False, 'import random\n'), ((25010, 25070), 'random.choice', 'random.choice', (['[1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]'], {}), '([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0])\n', (25023, 25070), False, 'import random\n'), ((25167, 25226), 'numpy.arange', 'np.arange', (['shape_start', '(shape_stop + shape_step)', 'shape_step'], {}), '(shape_start, shape_stop + shape_step, shape_step)\n', (25176, 25226), True, 'import numpy as np\n'), ((25250, 25281), 'numpy.random.choice', 'np.random.choice', (['shape_options'], {}), '(shape_options)\n', (25266, 25281), True, 'import numpy as np\n'), ((25469, 25492), 'numpy.dstack', 'np.dstack', (['(pn, pn, pn)'], {}), '((pn, pn, pn))\n', (25478, 25492), True, 'import numpy as np\n'), ((26052, 26127), 'numpy.arange', 'np.arange', (['intensity_start', '(intensity_stop + intensity_step)', 'intensity_step'], {}), '(intensity_start, intensity_stop + intensity_step, intensity_step)\n', (26061, 26127), True, 'import numpy as np\n'), ((26164, 26199), 'numpy.random.choice', 'np.random.choice', (['intensity_options'], {}), '(intensity_options)\n', (26180, 26199), True, 'import numpy as np\n'), ((26902, 26987), 'random.choice', 'random.choice', (['[0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]'], {}), '([0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]\n )\n', (26915, 26987), False, 'import random\n'), ((27680, 27706), 'numpy.random.randint', 'np.random.randint', (['max_mag'], {}), '(max_mag)\n', (27697, 27706), True, 'import numpy as np\n'), ((27725, 27751), 'numpy.random.randint', 'np.random.randint', (['max_ang'], {}), '(max_ang)\n', (27742, 27751), True, 'import numpy as np\n'), ((27773, 27789), 'common.randbin', 'common.randbin', ([], {}), '()\n', (27787, 27789), False, 'import common\n'), ((28557, 28573), 'common.randbin', 'common.randbin', ([], {}), '()\n', (28571, 28573), False, 'import common\n'), ((29264, 29296), 'random.uniform', 'random.uniform', (['min_ang', 'max_ang'], {}), '(min_ang, max_ang)\n', (29278, 29296), False, 'import random\n'), ((30334, 30386), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (30344, 30386), False, 'import cv2\n'), ((31050, 31102), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (31060, 31102), False, 'import cv2\n'), ((32298, 32350), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (32308, 32350), False, 'import cv2\n'), ((33366, 33412), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', '(-angle)', '(1.0)'], {}), '((cX, cY), -angle, 1.0)\n', (33389, 33412), False, 'import cv2\n'), ((33427, 33442), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (33433, 33442), True, 'import numpy as np\n'), ((33457, 33472), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (33463, 33472), True, 'import numpy as np\n'), ((33825, 33873), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(nW, nH)'], {'flags': 'interp'}), '(image, M, (nW, nH), flags=interp)\n', (33839, 33873), False, 'import cv2\n'), ((34385, 34433), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cm_y, cm_x)', '(-deg)', '(1.0)'], {}), '((cm_y, cm_x), -deg, 1.0)\n', (34408, 34433), False, 'import cv2\n'), ((34456, 34472), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (34464, 34472), True, 'import numpy as np\n'), ((35771, 35834), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'rot_mat', '(new_w[0], new_h[0])'], {'flags': 'interp'}), '(im, rot_mat, (new_w[0], new_h[0]), flags=interp)\n', (35785, 35834), False, 'import cv2\n'), ((37214, 37241), 'cv2.imwrite', 'cv2.imwrite', (['input_path', 'im'], {}), '(input_path, im)\n', (37225, 37241), False, 'import cv2\n'), ((37286, 37355), 'common.shell', 'common.shell', (["('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)"], {}), "('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)\n", (37298, 37355), False, 'import common\n'), ((37405, 37428), 'cv2.imread', 'cv2.imread', (['output_path'], {}), '(output_path)\n', (37415, 37428), False, 'import cv2\n'), ((37483, 37504), 'common.rm', 'common.rm', (['input_path'], {}), '(input_path)\n', (37492, 37504), False, 'import common\n'), ((37513, 37535), 'common.rm', 'common.rm', (['output_path'], {}), '(output_path)\n', (37522, 37535), False, 'import common\n'), ((37635, 37672), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', 'im.shape'], {}), '(mean, std, im.shape)\n', (37651, 37672), True, 'import numpy as np\n'), ((38036, 38054), 'cv2.LUT', 'cv2.LUT', (['im', 'table'], {}), '(im, table)\n', (38043, 38054), False, 'import cv2\n'), ((38871, 38907), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (38883, 38907), False, 'import cv2\n'), ((38925, 38950), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (38932, 38950), True, 'import numpy as np\n'), ((38970, 39008), 'cv2.dilate', 'cv2.dilate', (['gray', 'kernel'], {'iterations': '(1)'}), '(gray, kernel, iterations=1)\n', (38980, 39008), False, 'import cv2\n'), ((39031, 39099), 'cv2.threshold', 'cv2.threshold', (['dilation', 'max_black_intensity', '(255)', 'cv2.THRESH_BINARY'], {}), '(dilation, max_black_intensity, 255, cv2.THRESH_BINARY)\n', (39044, 39099), False, 'import cv2\n'), ((39175, 39243), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (39191, 39243), False, 'import cv2\n'), ((39324, 39351), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (39346, 39351), False, 'import cv2\n'), ((43182, 43223), 'numpy.ones', 'np.ones', (['(mask_size, mask_size)', 'np.uint8'], {}), '((mask_size, mask_size), np.uint8)\n', (43189, 43223), True, 'import numpy as np\n'), ((43242, 43288), 'cv2.dilate', 'cv2.dilate', (['scaled_sobel', 'kernel'], {'iterations': '(1)'}), '(scaled_sobel, kernel, iterations=1)\n', (43252, 43288), False, 'import cv2\n'), ((43330, 43379), 'cv2.threshold', 'cv2.threshold', (['dilated', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(dilated, 1, 255, cv2.THRESH_BINARY)\n', (43343, 43379), False, 'import cv2\n'), ((49470, 49513), 'cv2.resize', 'cv2.resize', (['self._raw_frame', '(new_w, new_h)'], {}), '(self._raw_frame, (new_w, new_h))\n', (49480, 49513), False, 'import cv2\n'), ((50034, 50107), 'numpy.zeros', 'np.zeros', (['(new_h, new_w, self._raw_frame.shape[2])', 'self._raw_frame.dtype'], {}), '((new_h, new_w, self._raw_frame.shape[2]), self._raw_frame.dtype)\n', (50042, 50107), True, 'import numpy as np\n'), ((50675, 50720), 'cv2.resize', 'cv2.resize', (['self._raw_frame', '(temp_w, temp_h)'], {}), '(self._raw_frame, (temp_w, temp_h))\n', (50685, 50720), False, 'import cv2\n'), ((51230, 51259), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (51257, 51259), False, 'import caffe\n'), ((52830, 52866), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (52842, 52866), False, 'import cv2\n'), ((52884, 52924), 'cv2.cvtColor', 'cv2.cvtColor', (['new_im', 'cv2.COLOR_GRAY2BGR'], {}), '(new_im, cv2.COLOR_GRAY2BGR)\n', (52896, 52924), False, 'import cv2\n'), ((52941, 53008), 'numpy.random.randint', 'np.random.randint', (['(-noise_delta)', '(noise_delta + 1)'], {'size': 'new_im.shape'}), '(-noise_delta, noise_delta + 1, size=new_im.shape)\n', (52958, 53008), True, 'import numpy as np\n'), ((59087, 59107), 'numpy.unique', 'np.unique', (['raw_label'], {}), '(raw_label)\n', (59096, 59107), True, 'import numpy as np\n'), ((61355, 61451), 'numpy.ndarray', 'np.ndarray', (['(self._raw_frame.shape[0], self._raw_frame.shape[1], cn)', 'self._raw_frame.dtype'], {}), '((self._raw_frame.shape[0], self._raw_frame.shape[1], cn), self.\n _raw_frame.dtype)\n', (61365, 61451), True, 'import numpy as np\n'), ((61934, 61963), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (61961, 61963), False, 'import caffe\n'), ((62285, 62383), 'numpy.zeros', 'np.zeros', (['[self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]]'], {'dtype': 'np.uint8'}), '([self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]\n ], dtype=np.uint8)\n', (62293, 62383), True, 'import numpy as np\n'), ((63521, 63586), 'cv2.threshold', 'cv2.threshold', (['self._raw_frame', 'thresh', 'maxval', 'cv2.THRESH_BINARY'], {}), '(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)\n', (63534, 63586), False, 'import cv2\n'), ((65049, 65069), 'numpy.sum', 'np.sum', (['pmap'], {'axis': '(0)'}), '(pmap, axis=0)\n', (65055, 65069), True, 'import numpy as np\n'), ((65354, 65384), 'numpy.zeros_like', 'np.zeros_like', (['self._raw_frame'], {}), '(self._raw_frame)\n', (65367, 65384), True, 'import numpy as np\n'), ((69472, 69505), 'numpy.count_nonzero', 'np.count_nonzero', (['self._raw_frame'], {}), '(self._raw_frame)\n', (69488, 69505), True, 'import numpy as np\n'), ((69621, 69659), 'numpy.count_nonzero', 'np.count_nonzero', (['(self._raw_frame == 0)'], {}), '(self._raw_frame == 0)\n', (69637, 69659), True, 'import numpy as np\n'), ((73313, 73389), 'numpy.empty', 'np.empty', (['(norm_prev.shape[0], norm_prev.shape[1], 6)'], {'dtype': 'norm_prev.dtype'}), '((norm_prev.shape[0], norm_prev.shape[1], 6), dtype=norm_prev.dtype)\n', (73321, 73389), True, 'import numpy as np\n'), ((73562, 73601), 'numpy.transpose', 'np.transpose', (['combined_image', '(2, 0, 1)'], {}), '(combined_image, (2, 0, 1))\n', (73574, 73601), True, 'import numpy as np\n'), ((74617, 74737), 'numpy.empty', 'np.empty', (['(self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1], channels)'], {'dtype': 'self._frame_bgr_prev.dtype'}), '((self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1],\n channels), dtype=self._frame_bgr_prev.dtype)\n', (74625, 74737), True, 'import numpy as np\n'), ((74974, 75003), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (75001, 75003), False, 'import caffe\n'), ((79298, 79406), 'numpy.empty', 'np.empty', (['(norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels)'], {'dtype': 'norm_frame_bgr.dtype'}), '((norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels),\n dtype=norm_frame_bgr.dtype)\n', (79306, 79406), True, 'import numpy as np\n'), ((79621, 79660), 'numpy.transpose', 'np.transpose', (['combined_image', '(2, 0, 1)'], {}), '(combined_image, (2, 0, 1))\n', (79633, 79660), True, 'import numpy as np\n'), ((80649, 80760), 'numpy.empty', 'np.empty', (['(self._frame_bgr.shape[0], self._frame_bgr.shape[1], total_channels)'], {'dtype': 'self._frame_bgr.dtype'}), '((self._frame_bgr.shape[0], self._frame_bgr.shape[1],\n total_channels), dtype=self._frame_bgr.dtype)\n', (80657, 80760), True, 'import numpy as np\n'), ((81011, 81040), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (81038, 81040), False, 'import caffe\n'), ((81780, 81795), 'numpy.max', 'np.max', (['probmap'], {}), '(probmap)\n', (81786, 81795), True, 'import numpy as np\n'), ((81821, 81836), 'numpy.min', 'np.min', (['probmap'], {}), '(probmap)\n', (81827, 81836), True, 'import numpy as np\n'), ((623, 639), 'numpy.stack', 'np.stack', (['[p, p]'], {}), '([p, p])\n', (631, 639), True, 'import numpy as np\n'), ((2232, 2288), 'numpy.clip', 'np.clip', (['((arr - min_arr) / (max_arr - min_arr))', '(0.0)', '(1.0)'], {}), '((arr - min_arr) / (max_arr - min_arr), 0.0, 1.0)\n', (2239, 2288), True, 'import numpy as np\n'), ((5671, 5712), 'cv2.imwrite', 'cv2.imwrite', (['path', 'self._raw_frame', 'flags'], {}), '(path, self._raw_frame, flags)\n', (5682, 5712), False, 'import cv2\n'), ((5746, 5780), 'cv2.imwrite', 'cv2.imwrite', (['path', 'self._raw_frame'], {}), '(path, self._raw_frame)\n', (5757, 5780), False, 'import cv2\n'), ((8554, 8568), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (8564, 8568), True, 'import numpy as np\n'), ((9523, 9559), 'random.randint', 'random.randint', (['(0)', '(height_border - 1)'], {}), '(0, height_border - 1)\n', (9537, 9559), False, 'import random\n'), ((9604, 9639), 'random.randint', 'random.randint', (['(0)', '(width_border - 1)'], {}), '(0, width_border - 1)\n', (9618, 9639), False, 'import random\n'), ((13004, 13040), 'random.randint', 'random.randint', (['(0)', '(height_border - 1)'], {}), '(0, height_border - 1)\n', (13018, 13040), False, 'import random\n'), ((13085, 13120), 'random.randint', 'random.randint', (['(0)', '(width_border - 1)'], {}), '(0, width_border - 1)\n', (13099, 13120), False, 'import random\n'), ((20067, 20078), 'numpy.ones', 'np.ones', (['ks'], {}), '(ks)\n', (20074, 20078), True, 'import numpy as np\n'), ((21740, 21751), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (21746, 21751), True, 'import numpy as np\n'), ((21771, 21782), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (21777, 21782), True, 'import numpy as np\n'), ((23011, 23054), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['new_im', '(ksize, ksize)', '(0)'], {}), '(new_im, (ksize, ksize), 0)\n', (23027, 23054), False, 'import cv2\n'), ((36557, 36579), 'common.deg_to_rad', 'common.deg_to_rad', (['ang'], {}), '(ang)\n', (36574, 36579), False, 'import common\n'), ((36598, 36610), 'numpy.floor', 'np.floor', (['wr'], {}), '(wr)\n', (36606, 36610), True, 'import numpy as np\n'), ((36629, 36641), 'numpy.floor', 'np.floor', (['hr'], {}), '(hr)\n', (36637, 36641), True, 'import numpy as np\n'), ((41670, 41724), 'numpy.empty', 'np.empty', (['(height, width)'], {'dtype': 'self._raw_frame.dtype'}), '((height, width), dtype=self._raw_frame.dtype)\n', (41678, 41724), True, 'import numpy as np\n'), ((42860, 42921), 'cv2.Sobel', 'cv2.Sobel', (['self._raw_frame', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'mask_size'}), '(self._raw_frame, cv2.CV_64F, 1, 0, ksize=mask_size)\n', (42869, 42921), False, 'import cv2\n'), ((42957, 43018), 'cv2.Sobel', 'cv2.Sobel', (['self._raw_frame', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'mask_size'}), '(self._raw_frame, cv2.CV_64F, 0, 1, ksize=mask_size)\n', (42966, 43018), False, 'import cv2\n'), ((43432, 43492), 'cv2.distanceTransform', 'cv2.distanceTransform', (['(255 - thresh)', 'cv2.DIST_L2'], {'maskSize': '(0)'}), '(255 - thresh, cv2.DIST_L2, maskSize=0)\n', (43453, 43492), False, 'import cv2\n'), ((45089, 45127), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (45099, 45127), False, 'import cv2\n'), ((65535, 65633), 'numpy.zeros', 'np.zeros', (['(self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1])'], {'dtype': 'np.uint8'}), '((self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]\n ), dtype=np.uint8)\n', (65543, 65633), True, 'import numpy as np\n'), ((65898, 65983), 'cv2.findContours', 'cv2.findContours', (['self._predicted_map[k]', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(self._predicted_map[k], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n', (65914, 65983), False, 'import cv2\n'), ((69291, 69356), 'cv2.threshold', 'cv2.threshold', (['self._raw_frame', 'thresh', 'maxval', 'cv2.THRESH_BINARY'], {}), '(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)\n', (69304, 69356), False, 'import cv2\n'), ((71796, 71817), 'cv2.imread', 'cv2.imread', (['path_prev'], {}), '(path_prev)\n', (71806, 71817), False, 'import cv2\n'), ((71819, 71840), 'cv2.imread', 'cv2.imread', (['path_next'], {}), '(path_next)\n', (71829, 71840), False, 'import cv2\n'), ((77584, 77610), 'cv2.imread', 'cv2.imread', (['path_frame_bgr'], {}), '(path_frame_bgr)\n', (77594, 77610), False, 'import cv2\n'), ((16746, 16776), 'numpy.full_like', 'np.full_like', (['im', 'border_value'], {}), '(im, border_value)\n', (16758, 16776), True, 'import numpy as np\n'), ((22514, 22545), 'random.shuffle', 'random.shuffle', (['lines_with_mask'], {}), '(lines_with_mask)\n', (22528, 22545), False, 'import random\n'), ((22793, 22880), 'numpy.round', 'np.round', (['(alpha * im[mask_warped > 0] + (1.0 - alpha) * im_warped[mask_warped > 0])'], {}), '(alpha * im[mask_warped > 0] + (1.0 - alpha) * im_warped[\n mask_warped > 0])\n', (22801, 22880), True, 'import numpy as np\n'), ((34668, 34687), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (34676, 34687), True, 'import numpy as np\n'), ((34717, 34740), 'numpy.array', 'np.array', (['[w - 1, 0, 1]'], {}), '([w - 1, 0, 1])\n', (34725, 34740), True, 'import numpy as np\n'), ((34770, 34793), 'numpy.array', 'np.array', (['[0, h - 1, 1]'], {}), '([0, h - 1, 1])\n', (34778, 34793), True, 'import numpy as np\n'), ((34823, 34850), 'numpy.array', 'np.array', (['[w - 1, h - 1, 1]'], {}), '([w - 1, h - 1, 1])\n', (34831, 34850), True, 'import numpy as np\n'), ((37054, 37075), 'common.gen_rand_str', 'common.gen_rand_str', ([], {}), '()\n', (37073, 37075), False, 'import common\n'), ((37134, 37155), 'common.gen_rand_str', 'common.gen_rand_str', ([], {}), '()\n', (37153, 37155), False, 'import common\n'), ((41833, 41918), 'numpy.empty', 'np.empty', (['(height, width, self._raw_frame.shape[2])'], {'dtype': 'self._raw_frame.dtype'}), '((height, width, self._raw_frame.shape[2]), dtype=self._raw_frame.dtype\n )\n', (41841, 41918), True, 'import numpy as np\n'), ((41954, 42020), 'numpy.empty', 'np.empty', (['(self._raw_frame.shape[2],)'], {'dtype': 'self._raw_frame.dtype'}), '((self._raw_frame.shape[2],), dtype=self._raw_frame.dtype)\n', (41962, 42020), True, 'import numpy as np\n'), ((43120, 43137), 'numpy.max', 'np.max', (['sobel_64f'], {}), '(sobel_64f)\n', (43126, 43137), True, 'import numpy as np\n'), ((43967, 44039), 'numpy.array_equal', 'np.array_equal', (['self._raw_frame[:, :, channel]', 'self._raw_frame[:, :, 0]'], {}), '(self._raw_frame[:, :, channel], self._raw_frame[:, :, 0])\n', (43981, 44039), True, 'import numpy as np\n'), ((47154, 47187), 'numpy.expand_dims', 'np.expand_dims', (['raw_frame'], {'axis': '(2)'}), '(raw_frame, axis=2)\n', (47168, 47187), True, 'import numpy as np\n'), ((52320, 52363), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', '(row, col, ch)'], {}), '(mean, std, (row, col, ch))\n', (52336, 52363), True, 'import numpy as np\n'), ((53042, 53073), 'numpy.clip', 'np.clip', (['(new_im + noise)', '(0)', '(255)'], {}), '(new_im + noise, 0, 255)\n', (53049, 53073), True, 'import numpy as np\n'), ((64993, 65029), 'numpy.amax', 'np.amax', (['self._predicted_map'], {'axis': '(0)'}), '(self._predicted_map, axis=0)\n', (65000, 65029), True, 'import numpy as np\n'), ((69165, 69191), 'numpy.unique', 'np.unique', (['self._raw_frame'], {}), '(self._raw_frame)\n', (69174, 69191), True, 'import numpy as np\n'), ((11286, 11322), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (11300, 11322), False, 'import random\n'), ((11369, 11405), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (11383, 11405), False, 'import random\n'), ((15172, 15208), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (15186, 15208), False, 'import random\n'), ((15263, 15299), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (15277, 15299), False, 'import random\n'), ((17318, 17361), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, -start_row]]'], {}), '([[1, 0, 0], [0, 1, -start_row]])\n', (17328, 17361), True, 'import numpy as np\n'), ((17387, 17496), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17401, 17496), False, 'import cv2\n'), ((17570, 17613), 'numpy.float32', 'np.float32', (['[[1, 0, -start_col], [0, 1, 0]]'], {}), '([[1, 0, -start_col], [0, 1, 0]])\n', (17580, 17613), True, 'import numpy as np\n'), ((17639, 17748), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17653, 17748), False, 'import cv2\n'), ((17824, 17866), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, start_row]]'], {}), '([[1, 0, 0], [0, 1, start_row]])\n', (17834, 17866), True, 'import numpy as np\n'), ((17892, 18001), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17906, 18001), False, 'import cv2\n'), ((18076, 18118), 'numpy.float32', 'np.float32', (['[[1, 0, start_col], [0, 1, 0]]'], {}), '([[1, 0, start_col], [0, 1, 0]])\n', (18086, 18118), True, 'import numpy as np\n'), ((18144, 18253), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (18158, 18253), False, 'import cv2\n'), ((34893, 34916), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'tl'], {}), '(rot_mat_hom, tl)\n', (34899, 34916), True, 'import numpy as np\n'), ((34959, 34982), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'tr'], {}), '(rot_mat_hom, tr)\n', (34965, 34982), True, 'import numpy as np\n'), ((35025, 35048), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'bl'], {}), '(rot_mat_hom, bl)\n', (35031, 35048), True, 'import numpy as np\n'), ((35091, 35114), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'br'], {}), '(rot_mat_hom, br)\n', (35097, 35114), True, 'import numpy as np\n'), ((37024, 37045), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (37043, 37045), False, 'import tempfile\n'), ((37104, 37125), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (37123, 37125), False, 'import tempfile\n'), ((38257, 38281), 'numpy.ones', 'np.ones', (['(height, width)'], {}), '((height, width))\n', (38264, 38281), True, 'import numpy as np\n'), ((38284, 38329), 'numpy.linspace', 'np.linspace', (['left_colour', 'right_colour', 'width'], {}), '(left_colour, right_colour, width)\n', (38295, 38329), True, 'import numpy as np\n'), ((51463, 51507), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'self._raw_frame', 'flags'], {}), "('.jpg', self._raw_frame, flags)\n", (51475, 51507), False, 'import cv2\n'), ((58059, 58123), 'numpy.array_equal', 'np.array_equal', (['label_image[:, :, channel]', 'label_image[:, :, 0]'], {}), '(label_image[:, :, channel], label_image[:, :, 0])\n', (58073, 58123), True, 'import numpy as np\n'), ((82666, 82706), 'numpy.unique', 'np.unique', (['data[:, left_right_offset, 0]'], {}), '(data[:, left_right_offset, 0])\n', (82675, 82706), True, 'import numpy as np\n'), ((82763, 82804), 'numpy.unique', 'np.unique', (['data[:, -left_right_offset, 0]'], {}), '(data[:, -left_right_offset, 0])\n', (82772, 82804), True, 'import numpy as np\n'), ((83608, 83648), 'numpy.unique', 'np.unique', (['data[top_bottom_offset, :, 0]'], {}), '(data[top_bottom_offset, :, 0])\n', (83617, 83648), True, 'import numpy as np\n'), ((83705, 83746), 'numpy.unique', 'np.unique', (['data[-top_bottom_offset, :, 0]'], {}), '(data[-top_bottom_offset, :, 0])\n', (83714, 83746), True, 'import numpy as np\n'), ((38562, 38586), 'numpy.ones', 'np.ones', (['(height, width)'], {}), '((height, width))\n', (38569, 38586), True, 'import numpy as np\n'), ((38589, 38634), 'numpy.linspace', 'np.linspace', (['left_colour', 'right_colour', 'width'], {}), '(left_colour, right_colour, width)\n', (38600, 38634), True, 'import numpy as np\n'), ((83134, 83174), 'numpy.unique', 'np.unique', (['data[:, left_right_offset, 0]'], {}), '(data[:, left_right_offset, 0])\n', (83143, 83174), True, 'import numpy as np\n'), ((83235, 83276), 'numpy.unique', 'np.unique', (['data[:, -left_right_offset, 0]'], {}), '(data[:, -left_right_offset, 0])\n', (83244, 83276), True, 'import numpy as np\n'), ((84073, 84113), 'numpy.unique', 'np.unique', (['data[top_bottom_offset, :, 0]'], {}), '(data[top_bottom_offset, :, 0])\n', (84082, 84113), True, 'import numpy as np\n'), ((84174, 84215), 'numpy.unique', 'np.unique', (['data[-top_bottom_offset, :, 0]'], {}), '(data[-top_bottom_offset, :, 0])\n', (84183, 84215), True, 'import numpy as np\n'), ((22351, 22374), 'numpy.nonzero', 'np.nonzero', (['mask_warped'], {}), '(mask_warped)\n', (22361, 22374), True, 'import numpy as np\n'), ((37984, 38001), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (37993, 38001), True, 'import numpy as np\n')]
#!/usr/bin/python2 import sys, os op = os.path.basename(sys.argv[0]) mypath = os.path.abspath(os.path.dirname(sys.argv[0])) PATH = os.getenv('PATH').split(':') if op == 'mv': # copy much cleaner than move in a build (immutable inputs) op = 'cp' # Delete ourselves from the PATH if mypath in PATH: del PATH[PATH.index(mypath)] os.environ['PATH'] = ':'.join(PATH) # Log a command entry LOGFILE = os.getenv('TRACE_LOG_LOCATION') with open(LOGFILE, 'a') as file: file.write(repr((os.getpid(), os.getcwd(), [op] + sys.argv[1:])) + '\n') # Create a process group for this command os.setpgid(0, 0) # Execute command = list(sys.argv) command[0] = op os.execvp(op, command) # STRACE_FILE = '{}.strace.{}'.format(LOGFILE, os.getpid()) # os.execvp('strace', ['strace', '-o', STRACE_FILE, '-f', '-e', 'open,chdir'] + command)
[ "os.getpid", "os.path.basename", "os.getcwd", "os.path.dirname", "os.setpgid", "os.execvp", "os.getenv" ]
[((40, 69), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (56, 69), False, 'import sys, os\n'), ((410, 441), 'os.getenv', 'os.getenv', (['"""TRACE_LOG_LOCATION"""'], {}), "('TRACE_LOG_LOCATION')\n", (419, 441), False, 'import sys, os\n'), ((595, 611), 'os.setpgid', 'os.setpgid', (['(0)', '(0)'], {}), '(0, 0)\n', (605, 611), False, 'import sys, os\n'), ((664, 686), 'os.execvp', 'os.execvp', (['op', 'command'], {}), '(op, command)\n', (673, 686), False, 'import sys, os\n'), ((95, 123), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (110, 123), False, 'import sys, os\n'), ((132, 149), 'os.getenv', 'os.getenv', (['"""PATH"""'], {}), "('PATH')\n", (141, 149), False, 'import sys, os\n'), ((496, 507), 'os.getpid', 'os.getpid', ([], {}), '()\n', (505, 507), False, 'import sys, os\n'), ((509, 520), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (518, 520), False, 'import sys, os\n')]
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from test_utils import ReportJSON from tensorflow.compiler.tests import xla_test from tensorflow.python.platform import googletest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ipu.config import IPUConfig from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops TYPES = (np.float16, np.float32, np.int32) TESTCASES = [{"testcase_name": np.dtype(x).name, "dtype": x} for x in TYPES] def _get_random_input(dtype, shape): if np.issubdtype(dtype, np.integer): info_fn = np.iinfo random_fn = np.random.random_integers else: info_fn = np.finfo random_fn = np.random.uniform return random_fn(info_fn(dtype).min, info_fn(dtype).max, size=shape).astype(dtype) class ArgMinMax(xla_test.XLATestCase, parameterized.TestCase): @parameterized.named_parameters(*TESTCASES) def testArgMaxBasic(self, dtype): cfg = IPUConfig() cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmax(a, output_type=dtypes.int32) with self.session() as sess: report_json = ReportJSON(self, sess) report_json.reset() with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3, 5, 2]) with ops.device("/device:IPU:0"): out = model(pa) input = _get_random_input(dtype, (3, 5, 2)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmax(input, axis=0)) report_json.parse_log(assert_len=4) @parameterized.named_parameters(*TESTCASES) def testArgMaxHalf(self, dtype): cfg = IPUConfig() cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmax(a, output_type=dtypes.int32) with self.session() as sess: with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3, 5, 2]) with ops.device("/device:IPU:0"): out = model(pa) input = _get_random_input(dtype, (3, 5, 2)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmax(input, axis=0)) @parameterized.named_parameters(*TESTCASES) def testArgMaxMultiDimensional(self, dtype): cfg = IPUConfig() cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a, axis): return math_ops.argmax(a, axis=axis, output_type=dtypes.int32) for axis in range(6): with self.session() as sess: with ops.device('cpu'): pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6]) p_axis = array_ops.placeholder(np.int32, shape=()) with ops.device("/device:IPU:0"): out = model(pa, p_axis) input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6)) fd = {pa: input, p_axis: axis} result = sess.run(out, fd) self.assertAllClose(result, np.argmax(input, axis=axis)) @parameterized.named_parameters(*TESTCASES) def testArgMinBasic(self, dtype): cfg = IPUConfig() cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmin(a, output_type=dtypes.int32) with self.session() as sess: report_json = ReportJSON(self, sess) with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3, 5, 2]) with ops.device("/device:IPU:0"): out = model(pa) report_json.reset() input = _get_random_input(dtype, (3, 5, 2)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmin(input, axis=0)) report_json.parse_log(assert_len=4) @parameterized.named_parameters(*TESTCASES) def testArgMinHalf(self, dtype): cfg = IPUConfig() cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmin(a, output_type=dtypes.int32) with self.session() as sess: with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3, 5, 2]) with ops.device("/device:IPU:0"): out = model(pa) input = _get_random_input(dtype, (3, 5, 2)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmin(input, axis=0)) @parameterized.named_parameters(*TESTCASES) def testArgMinMultiDimensional(self, dtype): cfg = IPUConfig() cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a, axis): return math_ops.argmin(a, axis=axis, output_type=dtypes.int32) for axis in range(6): with self.session() as sess: with ops.device('cpu'): pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6]) p_axis = array_ops.placeholder(np.int32, shape=()) with ops.device("/device:IPU:0"): out = model(pa, p_axis) input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6)) fd = {pa: input, p_axis: axis} result = sess.run(out, fd) self.assertAllClose(result, np.argmin(input, axis=axis)) @parameterized.named_parameters(*TESTCASES) def testArgMaxNegativeDim(self, dtype): cfg = IPUConfig() cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmax(a, axis=-1, output_type=dtypes.int32) with self.session() as sess: report_json = ReportJSON(self, sess) report_json.reset() with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3, 5, 2]) with ops.device("/device:IPU:0"): out = model(pa) input = _get_random_input(dtype, (3, 5, 2)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmax(input, axis=-1)) report_json.parse_log(assert_len=4) @parameterized.named_parameters(*TESTCASES) def testArgMaxVector(self, dtype): cfg = IPUConfig() cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access cfg.ipu_model.compile_ipu_code = False cfg.configure_ipu_system() def model(a): return math_ops.argmax(a, axis=0, output_type=dtypes.int32) with self.session() as sess: report_json = ReportJSON(self, sess) report_json.reset() with ops.device('cpu'): pa = array_ops.placeholder(dtype, [3]) with ops.device("/device:IPU:0"): out = model(pa) input = _get_random_input(dtype, (3)) fd = {pa: input} result = sess.run(out, fd) self.assertAllClose(result, np.argmax(input)) report_json.parse_log(assert_len=4) if __name__ == "__main__": os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' + os.environ.get('TF_XLA_FLAGS', '')) googletest.main()
[ "tensorflow.python.ops.math_ops.argmin", "tensorflow.python.ipu.config.IPUConfig", "tensorflow.python.ops.math_ops.argmax", "numpy.argmax", "numpy.dtype", "numpy.argmin", "tensorflow.python.platform.googletest.main", "os.environ.get", "test_utils.ReportJSON", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.array_ops.placeholder", "absl.testing.parameterized.named_parameters", "numpy.issubdtype" ]
[((1394, 1426), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (1407, 1426), True, 'import numpy as np\n'), ((1730, 1772), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (1760, 1772), False, 'from absl.testing import parameterized\n'), ((2529, 2571), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (2559, 2571), False, 'from absl.testing import parameterized\n'), ((3134, 3176), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (3164, 3176), False, 'from absl.testing import parameterized\n'), ((3916, 3958), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (3946, 3958), False, 'from absl.testing import parameterized\n'), ((4716, 4758), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (4746, 4758), False, 'from absl.testing import parameterized\n'), ((5321, 5363), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (5351, 5363), False, 'from absl.testing import parameterized\n'), ((6103, 6145), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (6133, 6145), False, 'from absl.testing import parameterized\n'), ((6918, 6960), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (6948, 6960), False, 'from absl.testing import parameterized\n'), ((7866, 7883), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (7881, 7883), False, 'from tensorflow.python.platform import googletest\n'), ((1819, 1830), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (1828, 1830), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((2617, 2628), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (2626, 2628), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((3234, 3245), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (3243, 3245), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((4005, 4016), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (4014, 4016), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((4804, 4815), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (4813, 4815), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((5421, 5432), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (5430, 5432), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((6198, 6209), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (6207, 6209), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((7008, 7019), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (7017, 7019), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((7828, 7862), 'os.environ.get', 'os.environ.get', (['"""TF_XLA_FLAGS"""', '""""""'], {}), "('TF_XLA_FLAGS', '')\n", (7842, 7862), False, 'import os\n'), ((1304, 1315), 'numpy.dtype', 'np.dtype', (['x'], {}), '(x)\n', (1312, 1315), True, 'import numpy as np\n'), ((2017, 2061), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (2032, 2061), False, 'from tensorflow.python.ops import math_ops\n'), ((2116, 2138), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (2126, 2138), False, 'from test_utils import ReportJSON\n'), ((2735, 2779), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (2750, 2779), False, 'from tensorflow.python.ops import math_ops\n'), ((3358, 3413), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': 'axis', 'output_type': 'dtypes.int32'}), '(a, axis=axis, output_type=dtypes.int32)\n', (3373, 3413), False, 'from tensorflow.python.ops import math_ops\n'), ((4203, 4247), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (4218, 4247), False, 'from tensorflow.python.ops import math_ops\n'), ((4302, 4324), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (4312, 4324), False, 'from test_utils import ReportJSON\n'), ((4922, 4966), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (4937, 4966), False, 'from tensorflow.python.ops import math_ops\n'), ((5545, 5600), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'axis': 'axis', 'output_type': 'dtypes.int32'}), '(a, axis=axis, output_type=dtypes.int32)\n', (5560, 5600), False, 'from tensorflow.python.ops import math_ops\n'), ((6396, 6449), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': '(-1)', 'output_type': 'dtypes.int32'}), '(a, axis=-1, output_type=dtypes.int32)\n', (6411, 6449), False, 'from tensorflow.python.ops import math_ops\n'), ((6504, 6526), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (6514, 6526), False, 'from test_utils import ReportJSON\n'), ((7206, 7258), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': '(0)', 'output_type': 'dtypes.int32'}), '(a, axis=0, output_type=dtypes.int32)\n', (7221, 7258), False, 'from tensorflow.python.ops import math_ops\n'), ((7313, 7335), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (7323, 7335), False, 'from test_utils import ReportJSON\n'), ((2177, 2194), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2187, 2194), False, 'from tensorflow.python.framework import ops\n'), ((2209, 2248), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (2230, 2248), False, 'from tensorflow.python.ops import array_ops\n'), ((2261, 2288), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2271, 2288), False, 'from tensorflow.python.framework import ops\n'), ((2456, 2480), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (2465, 2480), True, 'import numpy as np\n'), ((2825, 2842), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2835, 2842), False, 'from tensorflow.python.framework import ops\n'), ((2857, 2896), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (2878, 2896), False, 'from tensorflow.python.ops import array_ops\n'), ((2909, 2936), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2919, 2936), False, 'from tensorflow.python.framework import ops\n'), ((3104, 3128), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (3113, 3128), True, 'import numpy as np\n'), ((4337, 4354), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (4347, 4354), False, 'from tensorflow.python.framework import ops\n'), ((4369, 4408), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (4390, 4408), False, 'from tensorflow.python.ops import array_ops\n'), ((4421, 4448), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (4431, 4448), False, 'from tensorflow.python.framework import ops\n'), ((4643, 4667), 'numpy.argmin', 'np.argmin', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (4652, 4667), True, 'import numpy as np\n'), ((5012, 5029), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (5022, 5029), False, 'from tensorflow.python.framework import ops\n'), ((5044, 5083), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (5065, 5083), False, 'from tensorflow.python.ops import array_ops\n'), ((5096, 5123), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5106, 5123), False, 'from tensorflow.python.framework import ops\n'), ((5291, 5315), 'numpy.argmin', 'np.argmin', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (5300, 5315), True, 'import numpy as np\n'), ((6565, 6582), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (6575, 6582), False, 'from tensorflow.python.framework import ops\n'), ((6597, 6636), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (6618, 6636), False, 'from tensorflow.python.ops import array_ops\n'), ((6649, 6676), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (6659, 6676), False, 'from tensorflow.python.framework import ops\n'), ((6844, 6869), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(-1)'}), '(input, axis=-1)\n', (6853, 6869), True, 'import numpy as np\n'), ((7374, 7391), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (7384, 7391), False, 'from tensorflow.python.framework import ops\n'), ((7406, 7439), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3]'], {}), '(dtype, [3])\n', (7427, 7439), False, 'from tensorflow.python.ops import array_ops\n'), ((7452, 7479), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (7462, 7479), False, 'from tensorflow.python.framework import ops\n'), ((7641, 7657), 'numpy.argmax', 'np.argmax', (['input'], {}), '(input)\n', (7650, 7657), True, 'import numpy as np\n'), ((3489, 3506), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (3499, 3506), False, 'from tensorflow.python.framework import ops\n'), ((3523, 3571), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[1, 2, 3, 4, 5, 6]'], {}), '(dtype, [1, 2, 3, 4, 5, 6])\n', (3544, 3571), False, 'from tensorflow.python.ops import array_ops\n'), ((3591, 3632), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'shape': '()'}), '(np.int32, shape=())\n', (3612, 3632), False, 'from tensorflow.python.ops import array_ops\n'), ((3647, 3674), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (3657, 3674), False, 'from tensorflow.python.framework import ops\n'), ((3883, 3910), 'numpy.argmax', 'np.argmax', (['input'], {'axis': 'axis'}), '(input, axis=axis)\n', (3892, 3910), True, 'import numpy as np\n'), ((5676, 5693), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (5686, 5693), False, 'from tensorflow.python.framework import ops\n'), ((5710, 5758), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[1, 2, 3, 4, 5, 6]'], {}), '(dtype, [1, 2, 3, 4, 5, 6])\n', (5731, 5758), False, 'from tensorflow.python.ops import array_ops\n'), ((5778, 5819), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'shape': '()'}), '(np.int32, shape=())\n', (5799, 5819), False, 'from tensorflow.python.ops import array_ops\n'), ((5834, 5861), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5844, 5861), False, 'from tensorflow.python.framework import ops\n'), ((6070, 6097), 'numpy.argmin', 'np.argmin', (['input'], {'axis': 'axis'}), '(input, axis=axis)\n', (6079, 6097), True, 'import numpy as np\n')]
# coding=utf-8 """ 백준 11279번 : 최대 힙 """ import heapq import sys N = int(sys.stdin.readline()) heap = [] for _ in range(N): num = int(sys.stdin.readline()) if num == 0: if len(heap) != 0: print(heapq.heappop(heap)[1]) else: print(0) else: heapq.heappush(heap, (-num, num)) # max_heap
[ "heapq.heappush", "sys.stdin.readline", "heapq.heappop" ]
[((73, 93), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (91, 93), False, 'import sys\n'), ((139, 159), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (157, 159), False, 'import sys\n'), ((300, 333), 'heapq.heappush', 'heapq.heappush', (['heap', '(-num, num)'], {}), '(heap, (-num, num))\n', (314, 333), False, 'import heapq\n'), ((223, 242), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (236, 242), False, 'import heapq\n')]
import pyHiChi as pfc import numpy as np import math as ma def valueEx(x, y, z): Ex = 0 #for x or y #Ex=np.sin(z) #for z return Ex def valueEy(x, y, z): #Ey = 0 #for y or z #Ey = np.sin(x) #for x Ey = np.sin(x - z) #for xz return Ey def valueEz(x, y, z): Ez = 0 #for x or z or xz #Ez = np.sin(y) #for y return Ez def valueBx(x, y, z): #Bx = 0 #for x or z #Bx = np.sin(y) #for y Bx = np.sin(x - z)/np.sqrt(2) #for xz return Bx def valueBy(x, y, z): By = 0 #for x or y or xz #By = np.sin(z) #for z return By def valueBz(x, y, z): #Bz = 0 #for y or z #Bz = np.sin(x) #for x Bz = np.sin(x - z)/np.sqrt(2) #for xz return Bz def step(minCoords, maxCoords, gridSize): steps = pfc.vector3d(1, 1, 1) steps.x = (maxCoords.x - minCoords.x)/(gridSize.x) steps.y = (maxCoords.y - minCoords.y)/(gridSize.y) steps.z = (maxCoords.z - minCoords.z)/(gridSize.z) return steps gridSize = pfc.vector3d(20, 20, 20) minCoords = pfc.vector3d(0.0, 0.0, 0.0) maxCoords = pfc.vector3d(2*ma.pi, 2*ma.pi, 2*ma.pi) stepsGrid = step(minCoords, maxCoords, gridSize) timeStep = 1e-14 grid = pfc.YeeGrid(gridSize, timeStep, minCoords, stepsGrid) grid.setE(valueEx, valueEy, valueEz) grid.setB(valueBx, valueBy, valueBz) fieldSolver = pfc.FDTD(grid) fieldSolver.setPML(0, 0, 0) periodicalBC = pfc.PeriodicalBC(fieldSolver) #show import matplotlib.pyplot as plt import matplotlib.animation as animation N = 50 eps = 0.0 x = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N) z = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N) def getFields(): global grid, x, z, N #print(grid) Ex = np.zeros(shape=(N,N)) Ey = np.zeros(shape=(N,N)) Ez = np.zeros(shape=(N,N)) Bx = np.zeros(shape=(N,N)) By = np.zeros(shape=(N,N)) Bz = np.zeros(shape=(N,N)) for ix in range(N): for iy in range(N): coordXZ = pfc.vector3d(x[ix], 0.0, z[iy]) #for x or z or xz #coordXZ = pfc.vector3d(x[ix], z[iy], 0.0) #for y or x E = grid.getE(coordXZ) Ex[ix, iy] = E.x Ey[ix, iy] = E.y Ez[ix, iy] = E.z B = grid.getB(coordXZ) Bx[ix, iy] = B.x By[ix, iy] = B.y Bz[ix, iy] = B.z return Ex, Ey, Ez, Bx, By, Bz def updateData(): for i in range(1000): fieldSolver.updateFields() (Ex, Ey, Ez, Bx, By, Bz) = getFields() fig, axes = plt.subplots(ncols=3, nrows=2) im11 = axes[0, 0].imshow(Ex, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im11, ax=axes[0, 0]) im12 = axes[0, 1].imshow(Ey, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im12, ax=axes[0, 1]) im13 = axes[0, 2].imshow(Ez, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im13, ax=axes[0, 2]) im21 = axes[1, 0].imshow(Bx, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im21, ax=axes[1, 0]) im22 = axes[1, 1].imshow(By, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im22, ax=axes[1, 1]) im23 = axes[1, 2].imshow(Bz, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True) fig.colorbar(im23, ax=axes[1, 2]) i = 0 def updatefig(*args): global i updateData() (Ex, Ey, Ez, Bx, By, Bz) = getFields() im11.set_array(Ex) im12.set_array(Ey) im13.set_array(Ez) im21.set_array(Bx) im22.set_array(By) im23.set_array(Bz) i = i + 1 return im11, im12, im13, im21, im22, im23, ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True) plt.show()
[ "matplotlib.pyplot.show", "numpy.zeros", "pyHiChi.PeriodicalBC", "matplotlib.animation.FuncAnimation", "numpy.sin", "numpy.arange", "pyHiChi.vector3d", "pyHiChi.YeeGrid", "numpy.sqrt", "matplotlib.pyplot.subplots", "pyHiChi.FDTD" ]
[((902, 926), 'pyHiChi.vector3d', 'pfc.vector3d', (['(20)', '(20)', '(20)'], {}), '(20, 20, 20)\n', (914, 926), True, 'import pyHiChi as pfc\n'), ((939, 966), 'pyHiChi.vector3d', 'pfc.vector3d', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (951, 966), True, 'import pyHiChi as pfc\n'), ((979, 1024), 'pyHiChi.vector3d', 'pfc.vector3d', (['(2 * ma.pi)', '(2 * ma.pi)', '(2 * ma.pi)'], {}), '(2 * ma.pi, 2 * ma.pi, 2 * ma.pi)\n', (991, 1024), True, 'import pyHiChi as pfc\n'), ((1093, 1146), 'pyHiChi.YeeGrid', 'pfc.YeeGrid', (['gridSize', 'timeStep', 'minCoords', 'stepsGrid'], {}), '(gridSize, timeStep, minCoords, stepsGrid)\n', (1104, 1146), True, 'import pyHiChi as pfc\n'), ((1237, 1251), 'pyHiChi.FDTD', 'pfc.FDTD', (['grid'], {}), '(grid)\n', (1245, 1251), True, 'import pyHiChi as pfc\n'), ((1295, 1324), 'pyHiChi.PeriodicalBC', 'pfc.PeriodicalBC', (['fieldSolver'], {}), '(fieldSolver)\n', (1311, 1324), True, 'import pyHiChi as pfc\n'), ((1427, 1481), 'numpy.arange', 'np.arange', (['eps', '(2 * ma.pi - eps)', '(2 * (ma.pi - eps) / N)'], {}), '(eps, 2 * ma.pi - eps, 2 * (ma.pi - eps) / N)\n', (1436, 1481), True, 'import numpy as np\n'), ((1478, 1532), 'numpy.arange', 'np.arange', (['eps', '(2 * ma.pi - eps)', '(2 * (ma.pi - eps) / N)'], {}), '(eps, 2 * ma.pi - eps, 2 * (ma.pi - eps) / N)\n', (1487, 1532), True, 'import numpy as np\n'), ((2245, 2275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'nrows': '(2)'}), '(ncols=3, nrows=2)\n', (2257, 2275), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3544), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'updatefig'], {'interval': '(50)', 'blit': '(True)'}), '(fig, updatefig, interval=50, blit=True)\n', (3504, 3544), True, 'import matplotlib.animation as animation\n'), ((3546, 3556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3554, 3556), True, 'import matplotlib.pyplot as plt\n'), ((209, 222), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (215, 222), True, 'import numpy as np\n'), ((698, 719), 'pyHiChi.vector3d', 'pfc.vector3d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (710, 719), True, 'import pyHiChi as pfc\n'), ((1586, 1608), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1594, 1608), True, 'import numpy as np\n'), ((1614, 1636), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1622, 1636), True, 'import numpy as np\n'), ((1642, 1664), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1650, 1664), True, 'import numpy as np\n'), ((1670, 1692), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1678, 1692), True, 'import numpy as np\n'), ((1698, 1720), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1706, 1720), True, 'import numpy as np\n'), ((1726, 1748), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1734, 1748), True, 'import numpy as np\n'), ((400, 413), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (406, 413), True, 'import numpy as np\n'), ((414, 424), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (421, 424), True, 'import numpy as np\n'), ((602, 615), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (608, 615), True, 'import numpy as np\n'), ((616, 626), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (623, 626), True, 'import numpy as np\n'), ((1804, 1835), 'pyHiChi.vector3d', 'pfc.vector3d', (['x[ix]', '(0.0)', 'z[iy]'], {}), '(x[ix], 0.0, z[iy])\n', (1816, 1835), True, 'import pyHiChi as pfc\n')]
""" * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. """ """ This script is only used for demo of fprop_v4 This version will be deprecated in near future, please update to fprop or fprop_experimental. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import tensorflow as tf from tensorflow.python.distribute.values import PerReplica import hugectr_tf_ops """ 1. Define DNN model with fprop_v4, whole DNN model should be split into two sub-models.""" # define sparse model which contains embedding layer(s) class PluginSparseModel(tf.keras.models.Model): def __init__(self, gpus, batch_size, embedding_type, vocabulary_size, slot_num, embedding_vec_size, embedding_type, opt_hparam, update_type, atomic_update, max_feature_num, max_nnz, combiner, gpu_count): super(PluginSparseModel, self).__init__() self.vocabulary_size_each_gpu = (vocabulary_size // gpu_count) + 1 self.slot_num = slot_num self.embedding_vec_size = embedding_vec_size self.embedding_type = embedding_type self.optimizer_type = optimizer self.opt_hparam = opt_hparam self.update_type = update_type self.atomic_update = atomic_update self.max_feature_num = max_feature_num self.max_nnz = max_nnz self.combiner = combiner self.gpu_count = gpu_count # Make use init() only be called once. It will create resource manager for embedding_plugin. hugectr_tf_ops.init(visiable_gpus=gpus, seed=123, key_type='int64', value_type='float', batch_size=batch_size, batch_size_eval=len(gpus)) # create one embedding layer, and its embedding_name will be unique if there are more than one embedding layer. self.embedding_name = hugectr_tf_ops.create_embedding(initializer, name_=name, embedding_type=self.embedding_type, optimizer_type=self.optimizer_type, max_vocabulary_size_per_gpu=self.vocabulary_size_each_gpu, opt_hparams=self.opt_hparam, update_type=self.update_type, atomic_update=self.atomic_update, slot_num=self.slot_num, max_nnz=self.max_nnz, max_feature_num=self.max_feature_num, embedding_vec_size=self.embedding_vec_size, combiner=self.combiner) def build(self, _): # this tf.Variable is used for embedding plugin. self.bp_trigger = self.add_weight(name='bp_trigger', shape=(1,), dtype=tf.float32, trainable=True) @tf.function def call(self, row_indices, values, training=True): # forward propagtion of embedding layer return hugectr_tf_ops.fprop_v4(embedding_name=self.embedding_name, row_indices=row_indices, values=values, bp_trigger=self.bp_trigger, is_training=training, output_shape=[self.batch_size, self.slot_num, self.embedding_vec_size]) # define dense model which contains other parts of the DNN model class DenseModel(tf.keras.models.Model): def __init__(self, num_layers): super(DenseModel, self).__init__() self.num_layers = num_layers self.dense_layers = [] for _ in range(num_layers - 1): self.dense_layers.append(tf.keras.layers.Dense(units=1024, activation='relu')) self.out_layer = tf.keras.layers.Dense(units=1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='glorot_normal') @tf.function def call(self, inputs, training=True): hidden = tf.reshape(inputs, [tf.shape(inputs)[0], 26 * 32]) # [batchsize, slot_num * embedding_vec_size] for i in range(self.num_layers - 1): hidden = self.dense_layers[i](hidden) result = self.out_layer(hidden) return result """ 2.Define training loop with the model mentioned above """ def main(): # create MirroredStrategy with specified GPUs. strategy = tf.distribute.MirroredStrategy(devices=["/GPU:" + str(i) for i in range(gpu_count)]) # create sparse model outside the scope of MirroredStrategy sparse_model = PluginSparseModel(...) sparse_opt = tf.keras.optimizers.SGD() # create dense model inside the scope of MirroredSrategy with strategy.scope(): dense_model = DenseModel(...) dense_opt = tf.keras.optimizers.SGD() # define loss function for each replica loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) def _replica_loss(labels, logits): loss_value = loss_fn(labels, logits) return tf.nn.compute_average_loss(loss_value, global_batch_size=batch_size) # define dense model train step @tf.function def dense_train_step(dense_inputs, labels): with tf.GradientTape() as tape: # should watch inputs, in order to obtain gradients later tape.watch(dense_inputs) logits = dense_model(dense_inputs) replica_loss = _replica_loss(labels, logits) grads, input_grads = tape.gradient(replica_loss, [dense_model.trainable_weights, dense_inputs]) dense_opt.apply_gradients(zip(grads, dense_model.trainable_weights)) return replica_loss, input_grads # define whole model train step @tf.function def total_train_step(row_indices, values, labels): with tf.GradientTape() as tape: # do embedding fprop embedding_results = sparse_model(row_indices, values) # convert to PerReplica dense_inputs = tf.split(embedding_results, num_or_size_splits=gpu_count) dense_inputs = PerReplica(dense_inputs) labels = tf.expand_dims(labels, axis=1) labels = tf.split(labels, num_or_size_splits=gpu_count) labels = PerReplica(labels) replica_loss, input_grads = strategy.run(dense_train_step, args=(dense_inputs, labels)) # gather all grads from dense replicas all_grads = tf.concat(input_grads.values, axis=0) # do embedding backward embedding_grads = tape.gradient(embedding_results, sparse_model.trainable_weights, output_gradients=all_grads) sparse_opt.apply_gradients(zip(embedding_grads, sparse_model.trainable_weights)) return strategy.reduce(tf.distribute.ReduceOp.SUM, replica_loss, axis=None) # create a tf.data.Dataset to read data dataset = ... # training loop for step, (row_indices, values, labels) in enumerate(dataset): total_loss = total_train_step(row_indices, values, labels) # you can save model, print loss or do sth. else.
[ "tensorflow.nn.compute_average_loss", "hugectr_tf_ops.create_embedding", "hugectr_tf_ops.fprop_v4", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.SGD", "tensorflow.concat", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.shape", "tensorflow.python.distribute.values.PerReplica", "tensorflow.split", "tensorflow.GradientTape", "tensorflow.expand_dims" ]
[((5341, 5366), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {}), '()\n', (5364, 5366), True, 'import tensorflow as tf\n'), ((5599, 5699), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(False)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=False, reduction=tf.keras.\n losses.Reduction.NONE)\n', (5633, 5699), True, 'import tensorflow as tf\n'), ((2646, 3106), 'hugectr_tf_ops.create_embedding', 'hugectr_tf_ops.create_embedding', (['initializer'], {'name_': 'name', 'embedding_type': 'self.embedding_type', 'optimizer_type': 'self.optimizer_type', 'max_vocabulary_size_per_gpu': 'self.vocabulary_size_each_gpu', 'opt_hparams': 'self.opt_hparam', 'update_type': 'self.update_type', 'atomic_update': 'self.atomic_update', 'slot_num': 'self.slot_num', 'max_nnz': 'self.max_nnz', 'max_feature_num': 'self.max_feature_num', 'embedding_vec_size': 'self.embedding_vec_size', 'combiner': 'self.combiner'}), '(initializer, name_=name, embedding_type=\n self.embedding_type, optimizer_type=self.optimizer_type,\n max_vocabulary_size_per_gpu=self.vocabulary_size_each_gpu, opt_hparams=\n self.opt_hparam, update_type=self.update_type, atomic_update=self.\n atomic_update, slot_num=self.slot_num, max_nnz=self.max_nnz,\n max_feature_num=self.max_feature_num, embedding_vec_size=self.\n embedding_vec_size, combiner=self.combiner)\n', (2677, 3106), False, 'import hugectr_tf_ops\n'), ((3739, 3975), 'hugectr_tf_ops.fprop_v4', 'hugectr_tf_ops.fprop_v4', ([], {'embedding_name': 'self.embedding_name', 'row_indices': 'row_indices', 'values': 'values', 'bp_trigger': 'self.bp_trigger', 'is_training': 'training', 'output_shape': '[self.batch_size, self.slot_num, self.embedding_vec_size]'}), '(embedding_name=self.embedding_name, row_indices=\n row_indices, values=values, bp_trigger=self.bp_trigger, is_training=\n training, output_shape=[self.batch_size, self.slot_num, self.\n embedding_vec_size])\n', (3762, 3975), False, 'import hugectr_tf_ops\n'), ((4422, 4563), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""glorot_normal"""'}), "(units=1, activation='sigmoid', use_bias=True,\n kernel_initializer='glorot_normal', bias_initializer='glorot_normal')\n", (4443, 4563), True, 'import tensorflow as tf\n'), ((5514, 5539), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {}), '()\n', (5537, 5539), True, 'import tensorflow as tf\n'), ((5794, 5862), 'tensorflow.nn.compute_average_loss', 'tf.nn.compute_average_loss', (['loss_value'], {'global_batch_size': 'batch_size'}), '(loss_value, global_batch_size=batch_size)\n', (5820, 5862), True, 'import tensorflow as tf\n'), ((5978, 5995), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5993, 5995), True, 'import tensorflow as tf\n'), ((6562, 6579), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6577, 6579), True, 'import tensorflow as tf\n'), ((6752, 6809), 'tensorflow.split', 'tf.split', (['embedding_results'], {'num_or_size_splits': 'gpu_count'}), '(embedding_results, num_or_size_splits=gpu_count)\n', (6760, 6809), True, 'import tensorflow as tf\n'), ((6837, 6861), 'tensorflow.python.distribute.values.PerReplica', 'PerReplica', (['dense_inputs'], {}), '(dense_inputs)\n', (6847, 6861), False, 'from tensorflow.python.distribute.values import PerReplica\n'), ((6883, 6913), 'tensorflow.expand_dims', 'tf.expand_dims', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (6897, 6913), True, 'import tensorflow as tf\n'), ((6935, 6981), 'tensorflow.split', 'tf.split', (['labels'], {'num_or_size_splits': 'gpu_count'}), '(labels, num_or_size_splits=gpu_count)\n', (6943, 6981), True, 'import tensorflow as tf\n'), ((7003, 7021), 'tensorflow.python.distribute.values.PerReplica', 'PerReplica', (['labels'], {}), '(labels)\n', (7013, 7021), False, 'from tensorflow.python.distribute.values import PerReplica\n'), ((7199, 7236), 'tensorflow.concat', 'tf.concat', (['input_grads.values'], {'axis': '(0)'}), '(input_grads.values, axis=0)\n', (7208, 7236), True, 'import tensorflow as tf\n'), ((4342, 4394), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1024)', 'activation': '"""relu"""'}), "(units=1024, activation='relu')\n", (4363, 4394), True, 'import tensorflow as tf\n'), ((4755, 4771), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (4763, 4771), True, 'import tensorflow as tf\n')]