CombinedText
stringlengths
4
3.42M
import time class Getwork_store: def __init__(self): self.data = {} def add(self, server, merkle_root): self.data[merkle_root] = {'name':server["name"], 'timestamp':time.time()} return def get_server(self, merkle_root): if self.data.has_key(merkle_root): return self.data[merkle_root]['name'] return None def prune(self): for key, work in self.data.items(): if work['timestamp'] < (time.time() - (60*5)): del self.data[key] return Update getwork to prune itself and use a list instead of a dictionary #License# #bitHopper by Colin Rice is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. #Based on a work at github.com. import time from twisted.internet.task import LoopingCall class Getwork_store: def __init__(self): self.data = {} call = LoopingCall(self.prune) call.start(60) def add(self, server, merkle_root): self.data[merkle_root] = [server["name"], time.time()] def get_server(self, merkle_root): if self.data.has_key(merkle_root): return self.data[merkle_root][0] return None def prune(self): for key, work in self.data.items(): if work[1] < (time.time() - (60*5)): del self.data[key]
from bs4 import BeautifulSoup import re import sys import json import time from collections import OrderedDict from django.core.management.base import BaseCommand from django.db.models import Q from programs.models import Program, ProgramModules, LearningPlan from disciplines.models import Discipline, Semester, TrainingTerms from modules.models import Module def write_roman(num): roman = OrderedDict() roman[1000] = "M" roman[900] = "CM" roman[500] = "D" roman[400] = "CD" roman[100] = "C" roman[90] = "XC" roman[50] = "L" roman[40] = "XL" roman[10] = "X" roman[9] = "IX" roman[5] = "V" roman[4] = "IV" roman[1] = "I" def roman_num(num): for r in roman.keys(): x, y = divmod(num, r) yield roman[r] * x num -= (r * x) if num > 0: roman_num(num) else: break return "".join([a for a in roman_num(num)]) class Command(BaseCommand): """ Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств" """ help = "Create Django objects from raw&ugly UrFU data." requires_system_checks = True requires_migrations_checks = True class bcolors: HEADER = '\033[95m' OKBLUE = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def decompose(self, soup, tag, classname): [el.decompose() for el in soup.find_all(tag, {'class': classname})] def add_arguments(self, parser): parser.add_argument('html_path', nargs=1) parser.add_argument('uni_modules_path', nargs=1) parser.add_argument('programs_path', nargs=1) parser.add_argument('program_title', nargs=1) def handle(self, *args, **options): start_time = time.time() html_path = options["html_path"][0] uni_modules_path = options["uni_modules_path"][0] program_title = options["program_title"][0] programs_path = options["programs_path"][0] try: with open(html_path, encoding='utf-8') as html_file: raw_html = '\n'.join(html_file.readlines()) except: raise FileNotFoundError try: with open(uni_modules_path, encoding='utf-8') as modules_file: modules_json = json.load(modules_file) except: raise FileNotFoundError try: with open(programs_path, encoding='utf-8') as programs_file: raw_programs = '\n'.join(programs_file.readlines()) except: raise FileNotFoundError if raw_programs: programs_soup = BeautifulSoup(raw_programs, 'lxml') rows = [] for row in programs_soup.find_all('tr', {"class": "main-info"}): rows.append([val.text.strip() for val in row.find_all('td')]) for row in rows: try: program = Program.objects.get(title=row[1]) except: def level(x): return { 'Магистр'.lower() in str(x).lower(): "m", 'Специалист'.lower() in str(x).lower(): "s", 'Бакалавр'.lower() in str(x).lower(): "b", }[True] program = Program(title=row[1], training_direction=row[2], level=level(row[4]), ) program.save() print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}") try: program = Program.objects.filter(title=program_title).first() program.status = "p" program.save() except: raise NotImplementedError if not raw_html: sys.exit(1) soup = BeautifulSoup(raw_html, 'lxml') [s.extract() for s in soup('script')] [s.extract() for s in soup('style')] self.decompose(soup, "table", "menu_table") self.decompose(soup, "td", "navpath") self.decompose(soup, "div", "buttons") soup.find('td', id="nav_td").decompose() try: stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено" except: stage = False try: displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip() except: displayableTitle = "" try: number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip() except: number = "" try: active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip() except: active = "нет" try: title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip() except: title = "" try: loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip() except: loadTimeType = "часов в неделю" html = soup.find("table", {"class": "basic"}).prettify() lps = LearningPlan.objects.filter(uni_number=number, status="p") if len(lps) > 0: for lp in lps: lp.uni_displayableTitle = displayableTitle lp.uni_number = number lp.uni_active = active lp.uni_title = title lp.uni_stage = stage lp.uni_loadTimeType = loadTimeType lp.uni_html = html lp.save() if lp not in program.learning_plans.all(): program.learning_plans.add(lp) program.save() else: lp = LearningPlan(uni_displayableTitle=displayableTitle, uni_number=number, uni_active=active, uni_title=title, uni_stage=stage, uni_loadTimeType=loadTimeType, uni_html=html, status="p" ) lp.save() program.learning_plans.add(lp) program.save() table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList") headers = [header.text.strip() for header in table.find_all('th')] def find_row_index(row_text): headers = table.find_all('th') return headers.index(table.find('th', text=row_text)) def find_row_index_id(id): headers = table.find_all('th') return headers.index(table.find('th', id=id)) rows = [] for row in table.find_all('tr'): rows.append([val.text.strip() for val in row.find_all('td')]) # Ищем модули modules = [] for header in headers: if "Номер модуля, дисциплины".lower() == header.lower(): module_numbers_col = headers.index(header) for row in rows: if row: m = re.search('\d\d+', row[module_numbers_col]) if m and "М" in row[1]: for module in modules_json: if str(module["number"]) == str(m.group(0)): module["row"] = row modules.append(module) program_modules = ProgramModules.objects.filter(program=program) for module in modules: print(" ", module['title']) if program_modules.filter(module__uni_uuid=module["uuid"]): print(f"Модуль есть: {module['title']}") fulltime = False if 'зао' not in number: fulltime = True print("fulltime: ", fulltime) program_modules = [] if fulltime: term = TrainingTerms.objects.filter(title="4 года").first() for module in [m for m in modules if m["disciplines"]]: module_obj, semester = self.create_module(find_row_index_id, module, program, program_modules) semester = self.create_disciplines(find_row_index_id, module, module_obj, row, rows, semester, program, term) print(f"{self.bcolors.HEADER}В программе {len(program_modules)} модулей:{self.bcolors.ENDC}", end="\n ") print("\n ".join([pm.module.title for pm in program_modules])) if len(ProgramModules.objects.filter(Q(program=program))) != len(set([pm.module.title for pm in ProgramModules.objects.filter(Q(program=program))])): print(f"{self.bcolors.FAIL}Найдено дублирование модулей программы. Удалите их, либо поправьте в интерфейсе администратора (Ctrl+С).{self.bcolors.ENDC}") program_modules_fail = ProgramModules.objects.filter(~Q(id__in=[o.id for o in program_modules]), Q(program=program)) for pmf in program_modules_fail: remove = input(f"{self.bcolors.WARNING}Неверный модуль программы: {pmf.module.title}. Удалить?{self.bcolors.ENDC}") if remove.lower() in ("y", "да", "ok", "ок"): pmf.delete() print(f"{self.bcolors.OKGREEN}Удалено.{self.bcolors.ENDC}") def create_module(self, find_row_index_id, module, program, program_modules): print(f"{self.bcolors.HEADER}Ищем или создаём модуль: {module['title']}{self.bcolors.ENDC}") for i in range(10, 0, -1): try: ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")] try: if int(ze) > 0: semester = i except: pass except: semester = 99 if semester == 99: print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}") else: print(f"Семестр: {semester}") try: module_obj = Module.objects.filter(title=module["title"], uni_number=module["number"]).first() module_obj.uni_uuid = module["uuid"] module_obj.uni_number = module["number"] module_obj.uni_coordinator = module["coordinator"] module_obj.uni_type = module["type"] module_obj.uni_title = module["title"] module_obj.uni_competence = module["competence"] module_obj.uni_testUnits = module["testUnits"] module_obj.uni_priority = module["priority"] module_obj.uni_state = module["state"] module_obj.uni_approvedDate = module["approvedDate"] module_obj.uni_comment = module["comment"] module_obj.uni_file = module["file"] module_obj.uni_specialities = module["specialities"] module_obj.program = program module_obj.semester = semester module_obj.status = 'p' module_obj.save() print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}") except: print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}") module_obj = Module(title=module["title"], uni_uuid=module["uuid"], uni_number=module["number"], uni_coordinator=module["coordinator"], uni_type=module["type"], uni_title=module["title"], uni_competence=module["competence"], uni_testUnits=module["testUnits"], uni_priority=module["priority"], uni_state=module["state"], uni_approvedDate=module["approvedDate"], uni_comment=module["comment"], uni_file=module["file"], uni_specialities=module["specialities"], program=program, semester=semester, status='p', ) module_obj.save() program_module = ProgramModules.objects.filter(program=program, module=module_obj).first() if not program_module: print(f"{self.bcolors.WARNING}Модуль программы не найден, создаём: {module['title']} / {program.title}{self.bcolors.ENDC}") program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p") program_module.save() else: print( f"{self.bcolors.OKBLUE}Модуль программы найден {module['title']} / {program.title}{self.bcolors.ENDC}") program_modules.append(program_module) return module_obj, semester def create_disciplines(self, find_row_index_id, module, module_obj, row, rows, semester, program, term): for d in module["disciplines"]: if int(d["testUnits"]) > 0: for row in rows: if d["title"] in row: print(f"{self.bcolors.OKGREEN}Дисциплина: {d['title']}{self.bcolors.ENDC}") semesters = [] for i in range(1, 10): try: ze = row[ find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")] try: if int(ze) > 0: semesters.append(i) print(f"appanded {i} semester") except: pass except: pass print(semesters) if len(row[5]) > 0: cell_values = row[5].split("-") if len(cell_values) > 1: cell_values = range(cell_values[0], cell_values[1]) print(cell_values) # print( # f"{self.bcolors.BOLD}Ищем дисциплину \"{d['title']}\" модуля \"{module_obj.title}\"!{self.bcolors.ENDC}") # discipline = Discipline.objects.filter(title=d["title"], # module__in=Module.objects.filter(uni_uuid=module["uuid"]), # module__program=program).first() # print(discipline) # if discipline: # print(f"{self.bcolors.OKGREEN}Существует дисциплина {discipline.title}!{self.bcolors.ENDC}") # else: # print(f"{self.bcolors.FAIL}Не существует дисциплины {d['title']}!!{self.bcolors.ENDC}") # discipline = Discipline(title=d["title"]) # # # # discipline.module = module_obj # discipline.labor = d["testUnits"] # discipline.uni_uid = d["uid"] # discipline.uni_discipline = d["discipline"] # discipline.uni_number = d["number"] # discipline.uni_section = d["section"] # discipline.uni_file = d["file"] # discipline.period = semester - module_obj.semester + 1 # try: # try: # if int(max(row[5].split("-"))): # discipline.form = "z" # except: # pass # try: # if int(max(row[4].split("-"))): # discipline.form = "e" # except: # pass # except: # pass # # discipline.status = "p" # # discipline.save() # # self.create_semester(program, discipline, module, find_row_index_id, term) # print(f"{self.bcolors.OKBLUE}{discipline.title}{self.bcolors.ENDC}") return semesters new parser#60 from bs4 import BeautifulSoup import re import sys import json import time from collections import OrderedDict from django.core.management.base import BaseCommand from django.db.models import Q from programs.models import Program, ProgramModules, LearningPlan from disciplines.models import Discipline, Semester, TrainingTerms from modules.models import Module def write_roman(num): roman = OrderedDict() roman[1000] = "M" roman[900] = "CM" roman[500] = "D" roman[400] = "CD" roman[100] = "C" roman[90] = "XC" roman[50] = "L" roman[40] = "XL" roman[10] = "X" roman[9] = "IX" roman[5] = "V" roman[4] = "IV" roman[1] = "I" def roman_num(num): for r in roman.keys(): x, y = divmod(num, r) yield roman[r] * x num -= (r * x) if num > 0: roman_num(num) else: break return "".join([a for a in roman_num(num)]) class Command(BaseCommand): """ Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств" """ help = "Create Django objects from raw&ugly UrFU data." requires_system_checks = True requires_migrations_checks = True class bcolors: HEADER = '\033[95m' OKBLUE = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def decompose(self, soup, tag, classname): [el.decompose() for el in soup.find_all(tag, {'class': classname})] def add_arguments(self, parser): parser.add_argument('html_path', nargs=1) parser.add_argument('uni_modules_path', nargs=1) parser.add_argument('programs_path', nargs=1) parser.add_argument('program_title', nargs=1) def handle(self, *args, **options): start_time = time.time() html_path = options["html_path"][0] uni_modules_path = options["uni_modules_path"][0] program_title = options["program_title"][0] programs_path = options["programs_path"][0] try: with open(html_path, encoding='utf-8') as html_file: raw_html = '\n'.join(html_file.readlines()) except: raise FileNotFoundError try: with open(uni_modules_path, encoding='utf-8') as modules_file: modules_json = json.load(modules_file) except: raise FileNotFoundError try: with open(programs_path, encoding='utf-8') as programs_file: raw_programs = '\n'.join(programs_file.readlines()) except: raise FileNotFoundError if raw_programs: programs_soup = BeautifulSoup(raw_programs, 'lxml') rows = [] for row in programs_soup.find_all('tr', {"class": "main-info"}): rows.append([val.text.strip() for val in row.find_all('td')]) for row in rows: try: program = Program.objects.get(title=row[1]) except: def level(x): return { 'Магистр'.lower() in str(x).lower(): "m", 'Специалист'.lower() in str(x).lower(): "s", 'Бакалавр'.lower() in str(x).lower(): "b", }[True] program = Program(title=row[1], training_direction=row[2], level=level(row[4]), ) program.save() print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}") try: program = Program.objects.filter(title=program_title).first() program.status = "p" program.save() except: raise NotImplementedError if not raw_html: sys.exit(1) soup = BeautifulSoup(raw_html, 'lxml') [s.extract() for s in soup('script')] [s.extract() for s in soup('style')] self.decompose(soup, "table", "menu_table") self.decompose(soup, "td", "navpath") self.decompose(soup, "div", "buttons") soup.find('td', id="nav_td").decompose() try: stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено" except: stage = False try: displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip() except: displayableTitle = "" try: number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip() except: number = "" try: active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip() except: active = "нет" try: title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip() except: title = "" try: loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip() except: loadTimeType = "часов в неделю" html = soup.find("table", {"class": "basic"}).prettify() lps = LearningPlan.objects.filter(uni_number=number, status="p") if len(lps) > 0: for lp in lps: lp.uni_displayableTitle = displayableTitle lp.uni_number = number lp.uni_active = active lp.uni_title = title lp.uni_stage = stage lp.uni_loadTimeType = loadTimeType lp.uni_html = html lp.save() if lp not in program.learning_plans.all(): program.learning_plans.add(lp) program.save() else: lp = LearningPlan(uni_displayableTitle=displayableTitle, uni_number=number, uni_active=active, uni_title=title, uni_stage=stage, uni_loadTimeType=loadTimeType, uni_html=html, status="p" ) lp.save() program.learning_plans.add(lp) program.save() table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList") headers = [header.text.strip() for header in table.find_all('th')] def find_row_index(row_text): headers = table.find_all('th') return headers.index(table.find('th', text=row_text)) def find_row_index_id(id): headers = table.find_all('th') return headers.index(table.find('th', id=id)) rows = [] for row in table.find_all('tr'): rows.append([val.text.strip() for val in row.find_all('td')]) # Ищем модули modules = [] for header in headers: if "Номер модуля, дисциплины".lower() == header.lower(): module_numbers_col = headers.index(header) for row in rows: if row: m = re.search('\d\d+', row[module_numbers_col]) if m and "М" in row[1]: for module in modules_json: if str(module["number"]) == str(m.group(0)): module["row"] = row modules.append(module) program_modules = ProgramModules.objects.filter(program=program) for module in modules: print(" ", module['title']) if program_modules.filter(module__uni_uuid=module["uuid"]): print(f"Модуль есть: {module['title']}") fulltime = False if 'зао' not in number: fulltime = True print("fulltime: ", fulltime) program_modules = [] if fulltime: term = TrainingTerms.objects.filter(title="4 года").first() for module in [m for m in modules if m["disciplines"]]: module_obj, semester = self.create_module(find_row_index_id, module, program, program_modules) semester = self.create_disciplines(find_row_index_id, module, module_obj, row, rows, semester, program, term) print(f"{self.bcolors.HEADER}В программе {len(program_modules)} модулей:{self.bcolors.ENDC}", end="\n ") print("\n ".join([pm.module.title for pm in program_modules])) if len(ProgramModules.objects.filter(Q(program=program))) != len(set([pm.module.title for pm in ProgramModules.objects.filter(Q(program=program))])): print(f"{self.bcolors.FAIL}Найдено дублирование модулей программы. Удалите их, либо поправьте в интерфейсе администратора (Ctrl+С).{self.bcolors.ENDC}") program_modules_fail = ProgramModules.objects.filter(~Q(id__in=[o.id for o in program_modules]), Q(program=program)) for pmf in program_modules_fail: remove = input(f"{self.bcolors.WARNING}Неверный модуль программы: {pmf.module.title}. Удалить?{self.bcolors.ENDC}") if remove.lower() in ("y", "да", "ok", "ок"): pmf.delete() print(f"{self.bcolors.OKGREEN}Удалено.{self.bcolors.ENDC}") def create_module(self, find_row_index_id, module, program, program_modules): print(f"{self.bcolors.HEADER}Ищем или создаём модуль: {module['title']}{self.bcolors.ENDC}") for i in range(10, 0, -1): try: ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")] try: if int(ze) > 0: semester = i except: pass except: semester = 99 if semester == 99: print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}") else: print(f"Семестр: {semester}") try: module_obj = Module.objects.filter(title=module["title"], uni_number=module["number"]).first() module_obj.uni_uuid = module["uuid"] module_obj.uni_number = module["number"] module_obj.uni_coordinator = module["coordinator"] module_obj.uni_type = module["type"] module_obj.uni_title = module["title"] module_obj.uni_competence = module["competence"] module_obj.uni_testUnits = module["testUnits"] module_obj.uni_priority = module["priority"] module_obj.uni_state = module["state"] module_obj.uni_approvedDate = module["approvedDate"] module_obj.uni_comment = module["comment"] module_obj.uni_file = module["file"] module_obj.uni_specialities = module["specialities"] module_obj.program = program module_obj.semester = semester module_obj.status = 'p' module_obj.save() print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}") except: print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}") module_obj = Module(title=module["title"], uni_uuid=module["uuid"], uni_number=module["number"], uni_coordinator=module["coordinator"], uni_type=module["type"], uni_title=module["title"], uni_competence=module["competence"], uni_testUnits=module["testUnits"], uni_priority=module["priority"], uni_state=module["state"], uni_approvedDate=module["approvedDate"], uni_comment=module["comment"], uni_file=module["file"], uni_specialities=module["specialities"], program=program, semester=semester, status='p', ) module_obj.save() program_module = ProgramModules.objects.filter(program=program, module=module_obj).first() if not program_module: print(f"{self.bcolors.WARNING}Модуль программы не найден, создаём: {module['title']} / {program.title}{self.bcolors.ENDC}") program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p") program_module.save() else: print( f"{self.bcolors.OKBLUE}Модуль программы найден {module['title']} / {program.title}{self.bcolors.ENDC}") program_modules.append(program_module) return module_obj, semester def create_disciplines(self, find_row_index_id, module, module_obj, row, rows, semester, program, term): for d in module["disciplines"]: if int(d["testUnits"]) > 0: for row in rows: if d["title"] in row: print(f"{self.bcolors.OKGREEN}Дисциплина: {d['title']}{self.bcolors.ENDC}") semesters = [] for i in range(1, 10): try: ze = row[ find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")] try: if int(ze) > 0: semesters.append(i) print(f"appanded {i} semester") except: pass except: pass print(semesters) if len(row[5]) > 0: cell_values = row[5].split("-") if len(cell_values) > 1: cell_values = range(cell_values[0], cell_values[1]) print(cell_values) # print( # f"{self.bcolors.BOLD}Ищем дисциплину \"{d['title']}\" модуля \"{module_obj.title}\"!{self.bcolors.ENDC}") # discipline = Discipline.objects.filter(title=d["title"], # module__in=Module.objects.filter(uni_uuid=module["uuid"]), # module__program=program).first() # print(discipline) # if discipline: # print(f"{self.bcolors.OKGREEN}Существует дисциплина {discipline.title}!{self.bcolors.ENDC}") # else: # print(f"{self.bcolors.FAIL}Не существует дисциплины {d['title']}!!{self.bcolors.ENDC}") # discipline = Discipline(title=d["title"]) # # # # discipline.module = module_obj # discipline.labor = d["testUnits"] # discipline.uni_uid = d["uid"] # discipline.uni_discipline = d["discipline"] # discipline.uni_number = d["number"] # discipline.uni_section = d["section"] # discipline.uni_file = d["file"] # discipline.period = semester - module_obj.semester + 1 # try: # try: # if int(max(row[5].split("-"))): # discipline.form = "z" # except: # pass # try: # if int(max(row[4].split("-"))): # discipline.form = "e" # except: # pass # except: # pass # # discipline.status = "p" # # discipline.save() # # self.create_semester(program, discipline, module, find_row_index_id, term) # print(f"{self.bcolors.OKBLUE}{discipline.title}{self.bcolors.ENDC}") return semesters
# ================================================= # --------- tms - module of Variables for re-use---------------- # -------------- # Section 0 - Required modules # Section 1 - Planeflight variables # Section 2 - Drivers functions # Section 3 - GeosChem (bpch) prod loss variables # Section 4 - GeosChem (bpch) general variables # Section 5 - Misc # Section 6 - Dynamic p/l processing # Section 7 - Obervational variables # ---------------------------- ------------- ------------- ------------- # -------------- Contents # --------------- ------------- ------------- ------------- # ---- Section 0 ----- Required modules # --------------- ------------- ------------- ------------- # ---- Section 1 ----- Planeflight variables # 1.01 - PF variable dictionary *** # 1.02 - TRA_?? to Geos-Chem species name *** # --------------- ------------- ------------- ------------- # ---- Section 2 ----- Drivers functions # 2.01 - P/L tag to PD tag *** # 2.02 - Get P/L dictionary for a given family *** # 2.03 - Get P/L dictionary for a given species *** # --------------- ------------- ------------- ------------- # ---- Section 3 ----- GeosChem (bpch) prod loss variables # 3.01 - Spec to photolysis reaction p/l tag # 3.02 - Ox family for tag # --------------- ------------- ------------- ------------- # ---- Section 4 ----- GeosChem (bpch) general variables # 4.01 - v9-2 species in input.geos from num # 4.02 - Get Species Mass # 4.03 - Get Species stoichiometry # 4.04 - GEOS-Chem/ctm.bpch values (current main dict ) *** # 4.05 - latex species name # 4.06 - converts P/L tracer mulitpler to 1 # 4.07 - Returns tracers unit and scale (if requested) # 4.08 - Store of dirs for earth0, atmosviz1, and tms mac # 4.09 - Ox in species (redundant now? should adapt species stoich ) # 4.10 - Get Gaw site name from GAW ID # 4.11 - returns dict of gaw sites # 4.12 - Return lat, lon, alt for a given resolution # 4.13 - Get model array dimension for a given resolution # 4.14 - Convert gamap category/species name to Iris/bpch name # 4.99 - Reference data, (inc. grid data) from gchem # --------------- ------------- ------------- ------------- # ---- Section 5 ----- Misc # 5.01 - dir store (standard directories on different servers ) # 5.02 - Store of constants for use by funcs/progs # --------------- ------------- ------------- ------------- # ---- Section 6 ----- Dynamic prod/loss dictionary processing ( For GEOS-Chem) # 6.01 - Make rxn dict of all active reactions *** # 6.02 - Return all reactions for a given p/l family *** # 6.03 - Return reaction infomaiton for given tags (e.g. PD... ) # 6.04 - Create an indices list to split reaction by family (e.g. for Ox loss) # 6.05 - Return tags for a given reaction # 6.06 - Extract all p/l speacies in a given input.geos # 6.07 - Extract all active tags from a given smv.log # 6.08 - Extract all active PDs from a given smv.log # 6.09 - get all active reaction for a given tag # 6.10 - get all details for a given tag # 6.11 - get reaction coeffifecent # --------------- ------------- ------------- ------------- # ---- Section 7 ----- Observational variables # 7.01 - IO observation dictionary # 7.02 - BAE flight ID dictionary # 7.03 - CAST flight dictionary for CIMS/CIMSII # 7.04 - Iodocarbon obs. meta data # 7.05 - Stores locations for use by funcs/progs - LON, LAT, ALT - double up? ( with 5.02 ?? ) # 7.06 - Get Locations of observations (lats, lons, alts ) for given sites # 7.07 - sonde station variables (list of 432 sondes) # 7.08 - returns (lat, lon, alt (press), timezone (UTC) ) for a given site # ------------------ Section 0 ----------------------------------- # -------------- Required modules: # #!/usr/bin/python # # -- I/O / Low level import re import platform import pandas as pd from netCDF4 import Dataset #import Scientific.IO.NetCDF as S import sys import glob # - Math/Analysis import numpy as np # - tms from AC_tools.funcs4core import * # ------------------------------------------- Section 1 ------------------------------------------- # -------------- Planeflight variables # # -------------- # 1.01 - dictionary of variables used for planeflight_mod.F output # ------------- def pf_var( input, ver='1.7', ntracers=85, JREAs=[] ): # planeflight variable lists metvars = [ 'GMAO_TEMP', 'GMAO_ABSH', 'GMAO_SURF', 'GMAO_PSFC', 'GMAO_UWND', 'GMAO_VWND' ] species = [ 'O3', 'NO2', 'NO', 'NO3', 'N2O5', 'HNO4', 'HNO3', 'HNO2', 'PAN', 'PPN', 'PMN', 'R4N2', 'H2O2', 'MP', 'CH2O', 'HO2', 'OH', 'RO2', 'MO2', 'ETO2', 'CO', 'C2H6', 'C3H8', 'PRPE', 'ALK4', 'ACET', 'ALD2', 'MEK', 'RCHO', 'MVK', 'SO2', 'DMS', 'MSA', 'SO4', 'ISOP' ] all_species_not_TRA = [ 'A3O2', 'ATO2', 'B3O2', 'EOH', 'ETO2', 'ETP', 'GLYX', 'HO2', 'IAP', 'INO2', 'INPN', 'ISN1', 'ISNOOA', 'ISNOOB', 'ISNOHOO', 'ISNP', 'KO2', 'MAN2', 'MAO3', 'MAOP', 'MAOPO2', 'MCO3', 'MGLY', 'MO2', 'MRO2', 'MRP', 'OH', 'PO2', 'PP', 'PRN1', 'PRPN', 'R4N1', 'R4O2', 'R4P', 'RA3P', 'RB3P', 'RCO3', 'RIO2', 'ROH', 'RP', 'VRO2', 'VRP', 'LISOPOH', 'ISOPND', 'ISOPNB', 'HC5', 'DIBOO', 'HC5OO', 'DHMOB', 'MOBAOO', 'ISOPNBO2', 'ISOPNDO2', 'ETHLN', 'MACRN', 'MVKN', 'PYAC', 'IEPOXOO', 'ATOOH', 'PMNN', 'MACRNO2', 'PMNO2' ] OH_reactivity=[ 'NO', 'ISOP', 'GMAO_TEMP', 'GMAO_PSFC', 'CO', 'ACET', 'ALD2', 'MEK', 'MVK', 'MACR', 'C3H8', 'CH2O', 'C2H6', 'SO2', 'NO2', 'ISOPNB', 'ISOPND', 'NO3', 'HNO2', 'HNO3', 'OH', 'HO2', 'H2O2', 'MP', 'ATOOH', 'HNO4', 'ALK4', 'ISN1', 'R4N2', 'RCHO', 'ROH', 'PRPE', 'PMN', 'GLYC', 'GLYX', 'MGLY', 'HAC', 'INPN', 'PRPN', 'ETP', 'RA3P', 'RB3P', 'R4P', 'RP', 'PP', 'RIP', 'IEPOX', 'IAP', 'VRP', 'MRP', 'MAOP', 'MAP', 'DMS', 'HBr', 'Br2', 'BrO', 'CHBr3', 'CH2Br2', 'CH3Br', 'HC5', 'ISOPND', 'ISOPNB', 'ISNP', 'MVKN', 'MACRN', 'DHMOB', 'MOBA', 'ETHLN', 'PROPNN' ] OH_Extras4nic = [ 'OH', 'MCO3', 'A3O2', 'PO2', 'R4O2', 'R4O2', 'R4N1', 'ATO2', 'KO2', 'RIO2', 'VRO2', 'MRO2', 'MAN2', 'B3O2', 'INO2', 'ISNOOA', 'ISNOOB', 'ISNOHOO', 'PRN1', 'RCO3', 'MAO3', 'IEPOXOO', 'MAOPO2', 'MAOPO2', 'HC5OO', 'HC5OO', 'ISOPNDO2', 'ISOPNDO2', 'ISOPNBO2', 'ISOPNBO2', 'DIBOO', 'DIBOO', 'MOBAOO', 'MOBAOO', 'H2', 'CH4', 'HCOOH', 'MOH', 'ACTA', 'EOH', 'VRP' ] # remove inactive species inactive_spec = ['ACTA', 'CH4', 'EOH', 'H2', 'HCOOH', 'MOH'] [ OH_Extras4nic.pop(ii) for ii in sorted([ OH_Extras4nic.index(i) \ for i in inactive_spec ])[::-1] ] # Setup list of tracers if ver == '1.7': ntracers=85 if ver == '2.0': ntracers=101 if ver == 'johan_br.v92': ntracers=87 TRAs = ['TRA_'+ str(i) for i in range(1, ntracers+1) ] # TRAs = ['TRA_{:0>2}'.format(i) for i in range(1, ntracers+1) ] # Setup list of reactions ( photolysis and general ) if ver == '1.5': PHOT_1st, PHOT_last = 455, 533 if ver == '1.6': PHOT_1st, PHOT_last = 453, 531 if ver == '1.6.1': PHOT_1st, PHOT_last = 453, 530 if ver == '1.7': PHOT_1st, PHOT_last = 453, 529 if ver == '2.0': PHOT_1st, PHOT_last = 413, 614 JREAs = ['REA_'+ str(i) for i in range(PHOT_1st, PHOT_last) ] REAs_all = ['REA_'+ str(i) for i in range(0, 533) ] # reduced list for high time and spatial resolution if any( [ input ==i for i in 'slist_v9_2_NREA_red', 'slist_v9_2_NREA_red_NOy'] ): TRAs = GC_var('active_I') + ['AERI'] TRAs= [ num2spec( i, ver=ver, invert=True) for i in TRAs ] TRAs = [ 'TRA_{:0>2}'.format( i) for i in TRAs ] metvars = [ i for i in metvars if not any( [ (i==ii) \ for ii in 'GMAO_ABSH', 'GMAO_SURF', 'GMAO_PSFC' ] ) ] species = [ i for i in species if not any( [ (i==ii) for ii in 'R4N2', 'MP', 'CH2O', 'MO2', 'ETO2', 'CO', 'C2H6', 'C3H8', 'PRPE', 'ALK4', 'ACET', 'ALD2', 'MEK', 'RCHO', 'MVK', 'DMS', 'MSA', 'ISOP' ]) ] if input =='slist_ClearFlo': TRAs = 'CO', 'ACET', 'ALD2', 'ISOP', 'C2H6', 'C3H8', 'CH2O', \ 'MACR', 'HNO2', 'HNO3', 'MVK', 'NO', 'NO2', 'PAN', 'O3', TRAs= [ num2spec( i, ver=ver, invert=True) for i in TRAs ] TRAs = [ 'TRA_{:0>2}'.format( i) for i in TRAs ] # mannually add ethanol TRAs += [ 'TRA_86'] species = [ 'OH', 'MO2','HO2' ] if input =='slist_v9_2_NREA_red_NOy': # THIS IS NOT A GOOD APPROACH, use actual names an tranlate based on verison. # missing = [ 'TRA_17', 'TRA_60', 'TRA_30', 'TRA_31', 'TRA_50', \ # 'TRA_54', 'TRA_55', 'TRA_57' ] # use tracer TRA_60, TRA_30, TRA_31, for: 'MMN' , 'NH3' , 'NH4', 'R4N2', 'BrNO2', 'BrNO3','MPN', 'PROPNN', missing = 'MMN' , 'NH3' , 'NH4', 'R4N2', 'BrNO2', 'BrNO3','MPN', 'PROPNN' missing = [ num2spec( i, ver=ver, invert=True) for i in missing ] missing = [ 'TRA_{:0>2}'.format( i) for i in missing ] species = species + missing # Construct dictionary d= { 'species' : species, 'metvars' : metvars, 'REAs_all' : REAs_all, 'JREAs': JREAs, 'TRAs' : TRAs, 'slist' : species +TRAs +JREAs+ metvars , 'slist_v9_2_NH' : species + TRAs[:66] + metvars , 'slist_v9_2_NREA' : species + TRAs + metvars , 'slist_v9_2_NREA_red': species + TRAs + metvars, 'slist_REAs_all' : species + TRAs + REAs_all + metvars, 'slist_REAs_all_OH' : species + TRAs + metvars+OH_reactivity, 'slist_REAs_all_OH_extras' : all_species_not_TRA + TRAs + metvars, 'slist_v9_2_NREA_red_NOy' : species + TRAs + metvars, 'slist_v10_1.7_allspecs': all_species_not_TRA +TRAs+ JREAs +metvars, 'slist_ClearFlo': species + TRAs + metvars } # retrieve variable list from dictionary vars = d[input] # return unique list vars = sorted( list( set( vars) ) ) print vars return vars # -------------- # 1.02 - Translator for planeflight species to GEOS-Chem species # ------------- def what_species_am_i(input=None, V_9_2=True, V_9_2_C=False, \ ver='1.7', special_case=None, invert=False, rtn_dict=False, \ debug=False ) : """ What GEOS-Chem (GC) Species am i? takes TRA_## & returns GC ID or other wayround """ # select correct naming dictionary var ={ \ '1.7': 'GCFP_d2TRA_all_1.7', '1.6': 'GCFP_d2TRA_all_1.6' }[ver] # special_case = 'EOH' # special_case = 'EOH + actual names' # if all_TRA: # var ={ \ # '1.7': 'all_TRA_spec_met_1.7_EOH' # '1.6': 'GCFP_d2TRA_1.6' # }[ver] if not isinstance( special_case, type(None) ): var = { # 'EOH':'GCFP_d2TRA_all_1.7_EOH', # 'EOH + actual names':'GCFP_d2TRA_all_1.7_EOH_actual_names' # 'all_TRA_spec_met_1.7_EOH' : 'TRA_spec_met_all_1.7_EOH' #' 'all_TRA_spec_met_1.7_EOH':'TRA_spec_met_all_1.7_EOH_no_trailing_zeroes' # TRA_spec_met_all_1' }[special_case] # Get dictionary from variable store d = GC_var( var ) if debug: print d if invert: d = {v: k for k, v in d.items()} # return dictionary if rtn_dict: return d else: return d[input] # ---------------- Section 2 ------------------------------------------- # -------------- Drivers # # -------------- # 2.01 - Convert Production/Loss RD IDs for O3 to PD## for input.geos/tracer.dat linked files # ------------- def PLO3_to_PD(PL, fp=True, wd=None, ver='1.6', res='4x5',debug=False): """ Converts """ if any( [(ver ==i) for i in '1.3' ,'1.4' ,'1.5' , '1.6', '1.7' ]): if wd==None: if debug: print 'WARNING: Using MUTD wd' wd = MUTD_runs(ver=ver, res=res, debug=debug)[0] PDs, vars = p_l_species_input_geos( wd, ver=ver, rm_multiple_tagged_rxs=True) # Add other vars for ease of processing vars += ['PIOx', 'iLOX', 'LIOx', 'iPOX', 'POX', 'LOX', 'LOx', 'L_Iy'] PDs += ['PIOx', 'iLOX', 'LIOx', 'iPOX', 'POX', 'LOX', 'LOx', 'L_Iy'] return dict( zip(vars, PDs))[PL ] else: print 'update programme - manual PLdict now obsolete. ' # ------------- # 2.02 - DRIVER - uses functions to build a dictionary for a given family of loss # ------------- def get_pl_dict( wd, spec='LOX' , rmx2=False, debug=False): # Get reaction IDs for each rxn. in spec (p/l, e.g. LOX) nums, rxns, tags, Coe = prod_loss_4_spec( wd, spec, all_clean=True, debug=debug ) # Make a dictionary of coeffiecnts of reaction Coe_dict = dict(zip(nums, Coe) ) # unpack for mulutple tags of same reactions, then get details unpacked_tags = [j for k in tags for j in k ] details = [ get_tag_details( wd, tag ) for tag in unpacked_tags ] # Kludge - 1 rxn missing from POx tracking? - 999 + "'ISOPND+OH', '+', '=1.0ISOPND'" [ details.pop(n) for n, i in enumerate( details ) if i[1]==364] ind = [n for n, i in enumerate( nums ) if i ==354 ] # Get Coes and overwrite where prog_mod_tms has values Coes = [ get_rxn_Coe( wd, d[1], unpacked_tags[n], nums=nums, rxns=rxns, tags=tags, Coe=Coe, spec=spec, debug=debug ) for n, d in enumerate( details ) ] # Remove double ups, which are present due to Loss (LO3_??) and rate tagging (RD??) originally performed separately if rmx2: d = [ ['RD62', 'LO3_38'], ['RD59', 'LO3_30'], ['RD65', 'LO3_34'], \ ['RD93', 'LO3_55'], ['RD92', 'LO3_39'], [ 'RD95', 'LO3_36'], \ ['RD67', 'LO3_35'] ] d = [i[0] for i in d ] ind = [ n for n, i in enumerate(details) if any( [ i[0] == ii \ for ii in d ] ) ] if debug: print d, ind, [len(i) for i in details, Coes ] , [ [i[0] \ for i in details][ii] for ii in ind ][::-1] [ l.pop(i) for i in ind[::-1] for l in details, Coes ] if debug: print [len(i) for i in details, Coes ] # return a dictionary indexed by p/l tracer, with rxn #, # reaction str and Coe of rxn. return dict( zip( [i[0] for i in details], [ i[1:] + [ Coes[n] ] for n, i in enumerate( details) ] ) ) # ------------- # 2.03 - Get prod loss reactions for a given family. # ------------- def prod_loss_4_spec( wd, fam, all_clean=True, debug=False ): # --- Get Dict of all reactions, Keys = #s rdict = rxn_dict_from_smvlog(wd) # --- Get reaction # tracked by p/l diag for spec and coefficient. rxns = rxns_in_pl(wd, fam) nums = rxns.keys() Coe = [ rxn[-1] for rxn in rxns.values() ] # --- get all details from full reaction dictionary rxns = [ rdict[i] for i in nums ] # --- get tags for tracked reactions, state where reactions are un tracked tags = get_p_l_tags( rxns ) # --- cleaned tags if all_clean: tags = [ [re.sub('\+\d.\d', '', i) for i in u ] for u in tags ] tags = [ [re.sub('\=\d.\d', '', i) for i in u ] for u in tags ] # -- remove erroneous read/ Kludge on val # --- Fortran write error leads to combination of species at the # end of long line if (debug): print [ i[:3] for i in nums, rxns, tags, Coe] print [ len(i) for i in nums, rxns, tags, Coe] errs = ['LO3_36RD95' , 'ISOPNDPO3_50'] cerrs = [['LO3_36', 'RD95'], ['PO3_50'] ] for n, e in enumerate( errs ): try: ind = [ nn for nn, i in enumerate( tags) if any([ ( e in ii) for ii in i ]) ] [0] vars = [ i[ind] for i in nums, rxns, tags, Coe] if (debug): print 3, [ i[-1] for i in nums, rxns, tags, Coe], vars, [ len(i) for i in nums, rxns, tags, Coe] [i.pop(ind) for i in nums, rxns, tags, Coe ] # add the cerrs values on the end if (debug): print 4, [ i[-1] for i in nums, rxns, tags, Coe], [ len(i) for i in nums, rxns, tags, Coe] nums += [ vars[0] ] rxns += [vars[1] ] tags += [cerrs[n]] Coe += [vars[-1] ] if (debug): print 6, [ i[-1] for i in nums, rxns, tags, Coe], \ [ len(i) for i in nums, rxns, tags, Coe] print '->'*30, 'SUCCESS', n, e except: print '>'*100, 'FAIL' , n, e return nums, rxns, tags, Coe # ------------------------------------------- Section 3 ------------------------------------------- # -------------- GeosChem (bpch) prod loss variables # # -------------- # 3.01 - Spec to photolysis reaction p/l tag # ------------- def spec_phot_2_RD(spec): d = { 'OIO': 'RD67', 'ICl': 'RD74', 'I2O2': 'RD70', 'I2': 'RD64', \ 'CH2ICl': 'RD88', 'HOI': 'RD65', 'CH2IBr': 'RD89', 'INO': 'RD75', \ 'IO': 'RD66', 'CH2I2': 'RD72','CH3IT': 'RD71', 'IONO2': 'RD69', \ 'IONO': 'RD68', 'IBr': 'RD73' \ } return d[spec] # ------------- # 3.02 - Get families for reactions # ------------- def get_tag_fam( tag ): """ dictionary of manually constructed assignment list - Ox loss familes - addition for paranox Kludge (in v9-2 (patched), but removed in v10? ) """ # Ox family dictionary fam_d = { 'LO3_18': 'Photolysis', 'LR25': 'Bromine', 'LR21': 'Bromine', 'LO3_38': 'Iodine', 'LO3_63': 'NOy', 'LO3_10': 'HOx', 'LO3_34': 'Iodine', 'LO3_35': 'Iodine', 'LO3_30': 'Iodine', 'LR5': 'Bromine', 'LR6': 'Bromine', 'LO3_61': 'NOy', 'LO3_60': 'NOy', 'LO3_39': 'Iodine', 'LO3_05': 'HOx', 'LO3_07': 'NOy', 'LO3_06': 'HOx', 'LO3_49': 'NOy', 'LO3_62': 'NOy', 'LO3_03': 'HOx', 'LO3_02': 'HOx', 'LO3_67': 'NOy', 'LO3_66': 'NOy', 'LO3_69': 'NOy', 'LO3_42': 'NOy', 'LO3_41': 'NOy', 'LO3_40': 'NOy', 'LO3_47': 'HOx', 'LO3_46': 'NOy', 'LO3_09': 'HOx', 'LO3_44': 'NOy', 'LR37': 'HOx', 'LR36': 'NOy', 'LO3_65': 'NOy', 'LR30': 'Bromine', 'LO3_24': 'Iodine', 'LR10': 'Bromine', 'LR38': 'NOy', 'LO3_68': 'NOy', 'LO3_64': 'NOy', 'LO3_36': 'Iodine', 'LO3_57': 'NOy', 'LO3_72': 'NOy', 'RD98': 'Photolysis', 'LO3_71': 'NOy', 'LO3_58': 'NOy', 'LO3_54': 'Photolysis', 'LO3_55': 'Iodine', 'LO3_56': 'HOx', 'LO3_08': 'HOx', 'LO3_50': 'NOy', 'LO3_51': 'NOy', 'LO3_52': 'NOy', 'LO3_53': 'HOx' # added , 'RD63': 'Iodine', 'RD62':'Iodine', 'LO3_38': 'Iodine', 'RD59': 'Iodine', 'LO3_30' : 'Iodine', 'RD65': 'Iodine', 'LO3_34': 'Iodine', 'RD93': 'Iodine', 'LO3_55': 'Iodine', 'RD92': 'Iodine', 'LO3_39': 'Iodine' , 'LO3_36': 'Iodine','RD95': 'Iodine' , 'RD67': 'Iodine', 'LO3_35': 'Iodine' # , 'RD36': 'Bromine' # Kludge to allow combination reactions # Kludge - from Chris Holmes (paranox deposition, goes through p/l as Ox losss ) ,'LO3_70' : 'Photolysis' # Extra tags not in list? # (these reactions are appearing due to lack of inclusion of iodine # species in Ox family... ) # ,'RD19': 'iodine', 'RD37': 'iodine', 'RD01': 'iodine' } # Creigee reaction class/"family" dictionary # if cregiee: # fam_d ={ # } return fam_d[tag] # ----------------- Section 4 ------------------------------------------- # -------------- GeosChem (bpch) general variables # # 4.01 - v9-2 species in input.geos from num # 4.02 - Get Species Mass # 4.03 - Get Species stioch # 4.04 - GEOS-Chem/ctm.bpch values (current main dict ) # 4.05 - latex species name # 4.06 - converts P/L tracer mulitpler to 1 # 4.07 - Returns tracers unit and scale (if requested) # 4.08 - Store of dirs for earth0, atmosviz1, and tms mac # 4.09 - Ox in species (redundant now? should adapt species stoich ) # 4.10 - Get GAW site info (lat, lon, alt (press), timezone (UTC) ) # 4.99 - Reference data, (inc. grid data) from gchem - credit: GK (Gerrit Kuhlmann ) # -------------- # 4.01 - v9-2 species in input.geos from num # ------------- def num2spec( num=69, rtn_dict=False, invert=False, ver = '1.7' ): # get dictionary of tracer numbers d= what_species_am_i( ver=ver, rtn_dict=True, special_case=None ) # slice off just numbers nums =[ int(i[4:]) for i in d.keys()] # re-make dictionary d = dict( zip(nums, d.values() ) ) # inver to give spec for num if invert: d = { v: k for k, v in d.items() } if rtn_dict: return d else: return d[num] # -------------- # 4.02 - RMM (Mass) (g /mol) for species # ------------- # C3H5I == C2H5I (this is a vestigle typo, left in to allow for use of older model runs def species_mass(spec): d = { 'HIO3': 176.0, 'OCPO': 12.0, 'Br2': 160.0, 'OCPI': 12.0, 'O3': 48.0, 'PAN': 121.0, 'ACET': 12.0, 'RIP': 118.0, 'BrNO3': 142.0, 'Br': 80.0, 'HBr': 81.0, 'HAC': 74.0, 'ALD2': 12.0, 'HNO3': 63.0, 'HNO2': 47.0, 'C2H5I': 168.0, 'HNO4': 79.0, 'OIO': 159.0, 'MAP': 76.0, 'PRPE': 12.0, 'CH2I2': 268.0, 'IONO2': 189.0, 'NIT': 62.0, 'CH3Br': 95.0, 'C3H7I': 170.0, 'C3H8': 12.0, 'DMS': 62.0, 'CH2O': 30.0, 'CH3IT': 142.0, 'NO2': 46.0, 'NO3': 62.0, 'N2O5': 105.0, 'H2O2': 34.0, 'DST4': 29.0, 'DST3': 29.0, 'DST2': 29.0, 'DST1': 29.0, 'MMN': 149.0, 'HOCl': 52.0, 'NITs': 62.0, 'RCHO': 58.0, 'C2H6': 12.0, 'MPN': 93.0, 'INO': 157.0, 'MP': 48.0, 'CH2Br2': 174.0, 'SALC': 31.4, 'NH3': 17.0, 'CH2ICl': 167.0, 'IEPOX': 118.0, 'ClO': 51.0, 'NO': 30.0, 'SALA': 31.4, 'MOBA': 114.0, 'R4N2': 119.0, 'BrCl': 115.0, 'OClO': 67.0, 'PMN': 147.0, 'CO': 28.0, 'BCPI': 12.0, 'ISOP': 12.0, 'BCPO': 12.0, 'MVK': 70.0, 'BrNO2': 126.0, 'IONO': 173.0, 'Cl2': 71.0, 'HOBr': 97.0, 'PROPNN': 109.0, 'Cl': 35.0, 'I2O2': 286.0, 'I2O3': 302.0, 'I2O4': 318.0, 'I2O5': 338.0, 'MEK': 12.0, 'HI': 128.0, 'ISOPN': 147.0, 'SO4s': 96.0, 'I2O': 270.0, 'ALK4': 12.0, 'MSA': 96.0, 'I2': 254.0, 'PPN': 135.0, 'IBr': 207.0, 'MACR': 70.0, 'I': 127.0, 'AERI': 127.0, 'HOI': 144.0, 'BrO': 96.0, 'NH4': 18.0, 'SO2': 64.0, 'SO4': 96.0, 'IO': 143.0, 'CHBr3': 253.0, 'CH2IBr': 221.0, 'ICl': 162.0, 'GLYC': 60.0 # species, not in tracer list , 'HO2': 33.0, 'OH': 17.0,'CH4':16.0 , 'N':14.0, 'CH3I':142.0, 'CH2OO':46.0, 'S': 32.0, } return d[spec] # -------------- # 4.03 - return the stiochometry of Iodine in species # -------------- def spec_stoich( spec, IO=False, I=False, NO=False, OH=False, N=False, C=False ): # if I: # note - re-write to take stioch species (e.g. OH, I instead of booleans ) - asssume I == True as default # C3H5I == C2H5I (this is a vestigle typo, left in to allow for use of older model runs # aerosol cycling specs # 'LO3_36' : (2.0/3.0) , 'LO3_37' : (2.0/4.0), # aersol loss rxns... 'LO3_37' isn't true loss, as I2O4 is regen. temp # aerosol loss rxns - corrected stochio for Ox, adjsutment need for I d = { 'RD11': 1.0, 'RD10': 1.0, 'HIO3': 1.0, 'RD15': 1.0, 'RD62': 2.0, 'RD17': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'LO3_37': 0.5, 'CH2I2': 2.0, 'AERII': 1.0, 'CH2ICl': 1.0, 'PIOx': 1.0, 'C3H7I': 1.0, 'RD73': 1.0, 'RD72': 2.0, 'RD71': 1.0, 'RD70': 1.0, 'C3H5I': 1.0, 'RD57': 1.0, 'CH3IT': 1.0, 'IO': 1.0, 'LO3_38': 1.0, 'RD61': 1.0, 'RD68': 1.0, 'I2': 2.0, 'IONO': 1.0, 'LO3_36': 0.6666666666666666, 'INO': 1.0, 'RD88': 1.0, 'RD89': 1.0, 'LOx': 1.0, 'RD06': 1.0, 'RD07': 1.0, 'RD02': 1.0, 'RD01': 1.0, 'I': 1.0, 'LO3_24': 0.5, 'AERI': 1.0, 'HOI': 1.0, 'RD64': 2.0, 'RD65': 1.0, 'RD66': 1.0, 'RD67': 1.0, 'RD60': 1.0, 'RD47': 1.0, 'C2H5I': 1.0, 'RD63': 1.0, 'RD20': 1.0, 'RD22': 1.0, 'RD24': 1.0, 'RD69': 1.0, 'RD27': 1.0, 'OIO': 1.0, 'CH2IBr': 1.0, 'LIOx': 1.0, 'L_Iy': 1.0, 'ICl': 1.0, 'IBr': 1.0, 'RD95': 2.0, 'I2O2': 2.0, 'I2O3': 2.0, 'I2O4': 2.0, 'I2O5': 2.0, 'HI': 1.0, 'I2O': 2.0, 'RD59': 1.0, 'RD93': 2.0, 'RD92': 1.0, 'IONO2': 1.0, 'RD58': 1.0, # p/l for: IO, I 'RD15': 1.0, 'RD17': 1.0, 'RD75': 1.0, 'RD72': 2.0, 'RD71': 1.0, 'RD70': 1.0, 'RD56': 1.0, 'RD69': 1.0, 'RD88': 1.0, 'RD89': 1.0, 'RD06': 1.0, 'RD07': 1.0, 'RD08': 1.0, 'RD64': 2.0, 'RD65': 1.0, 'RD67': 1.0, 'RD46': 2.0, 'RD47': 1.0, 'RD20': 1.0, 'RD22': 1.0, 'RD68': 1.0, 'RD25': 1.0, 'RD96': 1.0 , 'RD11': 1.0, 'RD12': 2.0, 'RD02': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'RD24': 1.0, 'RD09': 1.0, 'RD23': 1.0, 'RD37': 1.0, 'RD97': 1.0, # kludge for test analysis (HEMCO emissions ) 'ACET' : 1.0, 'ISOP': 1.0, 'CH2Br2': 1.0, 'CHBr3':1.0, 'CH3Br':1.0 } if IO: d = { 'RD11': 2.0, 'RD10': 1.0, 'RD12': 2.0, 'LO3_36': 1./3., 'RD09': 1.0, 'RD66': 1.0, 'RD23': 1.0, 'RD37': 1.0, 'LO3_24': 1.0/2.0, 'RD56': 1.0, 'RD01': 1.0, 'RD08': 1.0, 'RD46': 2.0, 'RD30': 1.0, 'RD25': 1.0, 'RD27': 1.0, 'RD97':1.0 } if NO: d ={ 'NO2': 1.0, 'NO3': 1.0, 'N2O5': 2.0, 'NO': 1.0, 'PPN': 1.0, 'R4N2': 1.0, 'BrNO3': 1.0, 'INO': 1.0, 'PAN': 1.0, 'PMN': 1.0, 'HNO3': 1.0, 'HNO2': 1.0, 'NH3': 1.0, 'HNO4': 1.0, 'BrNO2': 1.0, 'IONO': 1.0, 'PROPNN': 1.0, 'NH4': 1.0, 'MPN': 1.0, 'MMN': 1.0, 'ISOPN': 1.0, 'IONO2': 1.0 } if OH: d = { 'LO3_18': 2.0, 'LO3_03': 1.0, 'RD95': 1.0, 'PO3_14': 1.0 } if N: d= { 'RD10': 1.0, 'LR26': 1.0, 'LR27': 1.0, 'LR20': 1.0, 'RD17': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'RD18': 2.0, 'LR28': 1.0, 'LO3_30': 1.0, 'RD75': 1.0, 'LR7': 1.0, 'LR8': 1.0, 'RD56': 1.0, 'RD24': 1.0, 'LO3_39': 1.0, 'RD25': 1.0, 'RD81': 1.0, 'LR35': 1.0, 'LR18': 1.0, 'LR17': 1.0, 'LR11': 1.0, 'LR39': 1.0, 'RD20': 1.0, 'RD21': 2.0, 'RD22': 1.0, 'RD23': 1.0, 'RD68': 1.0, 'RD69': 1.0 # NOy ( N in 'NOy') , 'NO2': 1.0, 'NO3': 1.0, 'N2O5': 2.0, 'NO': 1.0, 'PPN': 1.0, 'R4N2': 2.0, 'BrNO3': 1.0, 'INO': 1.0, 'PAN': 1.0, 'PMN': 1.0, 'HNO3': 1.0, 'HNO2': 1.0, 'NH3': 1.0, 'HNO4': 1.0, 'BrNO2': 1.0, 'IONO': 1.0, 'PROPNN': 1.0, 'NH4': 1.0, 'MPN': 1.0, 'MMN': 1.0, 'ISOPN': 1.0, 'IONO2': 1.0 } if C: d = { 'ACET': 3.0, 'ALD2': 2.0, 'C2H6': 2.0, 'C3H8': 3.0, 'ISOP': 5.0 } return d[spec] # -------------- # 4.04 - GEOS-Chem/ctm.bpch values # -------------- def GC_var(input_x=None, rtn_dict=False, debug=False): """ Note: Most of this dictionary is vestigial. <= ACTION NEEDED ( remove redundant variables ) f_var = GC flux (EW, NS , UP) variables Ox = 'Ox', 'POX', 'LOX' + list of drydep species # not inc. 'NO3df', 'HNO4df', 'BrOdf' , 'BrNO2', 'IO', 'IONO', 'OIO', Ox_p = Ox prod list Ox-l = Ox loss list d_dep = dry dep (category, name = species) w_dep = wet dep ( 3x categories (WETDCV = rain out loss in convective updrafts (kg/s), WETDLS = rainout in large scale precip (kg/s), CV-FLX = Mass change due to cloud convection (kg/s); name = species) BL_m = UPWARD MASS FLUX FROM BOUNDARY-LAYER MIXING, (category, name = species) f_strat = strat flux (to tropsosphere) (category, name = species) """ if (debug): print 'GC_var called' GC_var_dict = { # Ox budget analysis 'f_var' : ['EW-FLX-$', 'NS-FLX-$', 'UP-FLX-$' ], # 'r_t' : [ 'Photolysis','HOx','NOy' ,'Bromine', 'Iodine' ], # 'r_tn' : ['NOy' ,'Photolysis','HOx' ,'Bromine', 'Iodine' ], 'r_t' : [ 'Photolysis','HOx','Bromine', 'Iodine' ], 'r_tn' : ['Photolysis','HOx' ,'Bromine', 'Iodine' ], 'r_tn_lc' : ['photolysis','HOx' ,'bromine', 'iodine' ], # 'Ox' : ['Ox','POX','LOX','O3df','NO2df', 'PANdf', 'PMNdf', 'PPNdf', 'N2O5df','HNO3df', 'HOBrdf','BrNO3df','HOIdf','IONO2df', 'I2O2df', 'I2O4df','I2O3df'], # 'Ox1.1' : ['Ox','POX','LOX','O3df','NO2df', 'PANdf', 'PMNdf', 'PPNdf', 'N2O5df','HNO3df', 'HOBrdf','BrNO3df','HOIdf','IONO2df', 'I2O2df'], # 'Ox_spec' : ['O3', 'NO2', 'NO3', 'PAN', 'PMN', 'PPN', 'HNO4', 'N2O5', 'HNO3', 'BrO', 'HOBr', 'BrNO2', 'BrNO3', 'MPN', 'IO', 'HOI', 'IONO', 'IONO2', 'OIO', 'I2O2', 'I2O4', 'I2O3'], # 'Ox_spec1.1' : ['O3', 'NO2', 'NO3', 'PAN', 'PMN', 'PPN', 'HNO4', 'N2O5', 'HNO3', 'BrO', 'HOBr', 'BrNO2', 'BrNO3', 'MPN', 'IO', 'HOI', 'IONO', 'IONO2', 'OIO', 'I2O2', 'I2O4'], # 'Ox_p_1.3' : ['PO3_85', 'PO3_76', 'PO3_62', 'PO3_63', 'RD06', 'PO3_01', 'PO3_77', 'PO3_86', 'PO3_79', 'PO3_14', 'PO3_87', 'PO3_66', 'PO3_15', 'PO3_67', 'PO3_51', 'PO3_88', 'PO3_56', 'PO3_89', 'PO3_90', 'PO3_72', 'PO3_91', 'PO3_02', 'PO3_03', 'PO3_92', 'PO3_64', 'PO3_68', 'LR9', 'PO3_58', 'PO3_57', 'PO3_70', 'PO3_60', 'PO3_65', 'PO3_18', 'PO3_73', 'PO3_19', 'PO3_20', 'PO3_21', 'PO3_22', 'PO3_24', 'PO3_25', 'PO3_26', 'PO3_27', 'PO3_34', 'PO3_35', 'PO3_37', 'PO3_38', 'PO3_39', 'PO3_05', 'PO3_45', 'PO3_69', 'PO3_46', 'PO3_97', 'PO3_47', 'PO3_48', 'PO3_52', 'PO3_53', 'PO3_80', 'PO3_40', 'PO3_54', 'PO3_81', 'PO3_55', 'PO3_74', 'PO3_41', 'PO3_43', 'PO3_93', 'PO3_94', 'PO3_95', 'PO3_96', 'PO3_84', 'PO3_61', 'PO3_83', 'PO3_59', 'PO3_71', 'PO3_98', 'PO3_99', 'PO3100', 'PO3101', 'PO3_75', 'PO3_50'], # 'ISOPND', # 'Ox_l_fp' : ['LO3_18','RD98','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40',#'LO3_48','LO3_04', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10', 'LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30', 'LO3_39', 'LO3_38','LO3_55' , 'LO3_36', 'RD63'], # Iy route # 'Ox_l_fp1.1' : ['LO3_18','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40',#'LO3_48','LO3_04', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10','LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30','LO3_39', 'LO3_38','LO3_55' , 'LO3_36', 'RD63'], # Iy route # 'Ox_l_fp1.3' : ['LO3_18','RD98','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', 'LO3_56','LR37', 'LR38', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40','LR36', 'LO3_58', #'LO3_48','LO3_04',,# NOy route # 'LO3_57', 'LO3_07', 'LO3_60', 'LO3_61', 'LO3_62', 'LO3_63', 'LO3_64', 'LO3_65', 'LO3_66', 'LO3_67', 'LO3_68', 'LO3_69', 'LO3_72','LO3_71', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10', 'LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30', 'LO3_39', 'LO3_38','LO3_55' , 'LO3_36'], # Iy route # 'Ox_l_fp_r_' : [(0, 3), (3, 11), (11, 21), (21, 28), (28, None)], #, (-2, -1)] # 'Ox_l_fp_r_1.1' : [(0, 2), (2, 11), (11, 21), (21, 28), (28, None)], #, (-2, -1)] # 'Ox_l_fp_r_1.3' : [(0, 3), (3, 14), (14, 40), (40, 47), (47, None)], #, (-2, -1)] 'fams' : ['I2','HOI','IO', 'I', 'HI+OIO+IONO+INO', 'IONO2','IxOy', 'CH3I', 'CH2IX'], # Iy families 'fams_A' : ['I2','HOI','IO', 'I', 'HI+OIO+IONO+INO', 'IONO2','IxOy', 'CH3I', 'CH2IX', 'AERI'], # Iy families 'fam_slice' : [(0, 1), (1, 2), (2, 3), (3,4 ),(4, 8), (8, 9), (9, 12), (12, 13), (13, None)], # slice 'fam_slice_A' : [(0, 1), (1, 2), (2, 3), (3,4 ),(4, 8), (8, 9), (9, 12), (12, 13), (13, 16),(16, None)], # slice # 'POx_l_fp' : ['PO3_01', 'PO3_03', 'PO3_02', 'PO3_05','PO3_14', 'PO3_15', 'PO3_18', 'PO3_19', 'PO3_20', 'PO3_21', 'PO3_22', 'PO3_24', 'PO3_25', 'PO3_26', 'PO3_27', 'PO3_30', 'PO3_31', 'PO3_32', 'PO3_33', 'PO3_34', 'PO3_35', 'PO3_37', 'PO3_38', 'PO3_39', 'PO3_40', 'PO3_41', 'PO3_43'], 'Ox_key' : ['POX', 'PO3_14', 'PO3_15', 'LOX'],#, 'LO3_18', 'LO3_03', 'LO3_02','LR25', 'LR21', 'LR5','LR6','LO3_34', 'LO3_33','LO3_24', 'LO3_35' ], 'POxLOx' : ['POX', 'LOX'], 'iPOxiLOx' : ['POX', 'LOX', 'iPOX', 'iLOX'], # Iy/ Iodine budget analysis 'BL_FT_UT' : [(0, 6), (6, 26), (26, 38)] , 'n_order' :['CH2IX','CH3I', 'I2', 'HOI','IO', 'I', 'IONO2','HI+OIO+IONO+INO','IxOy' ] , 'n_order_A' :['CH2IX','CH3I', 'I2', 'HOI','IO', 'I', 'IONO2','HI+OIO+IONO+INO','IxOy', 'AERI' ] , 'I_l' : ['RD01', 'RD02', 'RD16', 'RD19', 'RD24', 'RD27'], 'IO_l' : ['RD09', 'RD10', 'RD11', 'RD12', 'RD23', 'LO3_24', 'RD37', 'RD97', 'RD66'], # LO37 swaped for RD97 as LO37 assigned to loss point of I2O3 uptake 'I_p' : ['RD06','RD07','RD10','RD11','RD47','RD15','RD17','RD20','RD22', 'LO3_24', 'RD64', 'RD65', 'RD66', 'RD67','RD68','RD69', 'RD70', 'RD71','RD72', 'RD73', 'RD88', 'RD89'], 'IO_p' : [ 'RD01', 'RD08', 'RD46', 'RD25', 'RD27','RD56'], 'sOH' : ['LO3_18'], 'd_dep' : ['DRYD-FLX'], 'w_dep' : ['WETDCV-$','WETDLS-$'], 'BL_m' : ['TURBMC-$'], 'f_strat' : ['STRT-FL'], 'p_l' : ['PORL-L=$'], 'Cld_flx' : ['CV-FLX-$'], 'I_Br_O3' : ['IO', 'OIO','HOI','I2','I','CH3IT','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO'], 'IOrg_RIS' : ['CH3IT','CH2ICl','CH2I2', 'CH2IBr', 'I2','HOI','I','IO', 'OIO', 'HI','IONO','IONO2'], 'I_specs' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2', 'I2O3','I2O4''CH3IT','CH2I2','I','INO'] , 'Iy' : ['I2','HOI','IO', 'OIO', 'HI','INO','IONO', 'IONO2','I2O2', 'I2O3','I2O4','I'], 'Iy1.1' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I2O4','I','INO'], 'IOy' : ['HOI','IO', 'OIO','IONO','IONO2','INO','I2O2','I2O4', 'I2O3'], 'IOy1.1' : ['HOI','IO', 'OIO','IONO','IONO2','INO','I2O2','I2O4'], 'I2Ox' : ['I2O2','I2O4','I2O3'], 'I2Ox' : ['I2O2','I2O4','I2O3'], 'IyOx1.1' : ['I2O2','I2O4'], 'Iy_no_i2o4' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I','INO', 'I2O3'], 'Iy_no_i2o41.1' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I','INO'], 'Phot_s_Iy' : ['CH3IT','CH2ICl','CH2I2', 'CH2IBr'],#['RD89', 'RD88', 'RD71', 'RD72'], 'HOI' : ['HOI'], 'IOx' : ['IO','I',], 'IO' : ['IO'], 'I' : ['I',], 'OIO' : ['OIO'], 'LIOx' : ['LIOx'], # LOx is p/l tracer name, for Loss of IOx 'PIOx' : ['PIOx'], # LOx is p/l tracer name, for Loss of IOx 'iodine_all' : ['I2','HOI','IO', 'I', 'HI', 'OIO', 'INO', 'IONO','IONO2','I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I','C2H5I','ICl', 'I2O', 'IBr', 'HIO3', ], 'iodine_all_A': ['I2','HOI','IO', 'I', 'HI', 'OIO', 'INO', 'IONO','IONO2','I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I','C2H5I','ICl', 'I2O', 'IBr', 'HIO3','AERI' ], # Misc analysis 'LHOI' : ['RD65', 'RD63', 'RD08'], 'LHOBr' : ['LR25', 'LR30','LR21'], 'LI2' : ['RD64', 'RD06', 'RD22'], 'LCH2I2' : ['RD72' ], 'LCH2Cl' : ['RD88' ] , 'LCH2Br' : ['RD89' ], 'LCH3IT' : ['RD15' , 'RD71'], 'sHOX' : ['HOI', 'HOBr'], 'HO2_loss' : ['PO3_14','RD09','RD02', 'LR2', 'LR3', 'PO3_46','PO3_02', 'PO3_03', 'PO3_05'], 'CAST_int' : ['IO', 'OIO','HOI','I2','I','HOI','CH3I','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C3H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'OH', 'HO2','NO','NO2'], 'CAST_intn' : ['IO', 'OIO','HOI','I2','I','HOI','CH3IT','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'DMS', 'NO','HNO3','HNO4', 'NO2','NO3' , 'PAN' , 'HNO2', 'N2O5'], 'CAST_int_n' : ['IO', 'OIO','HOI','I2','I','HOI','CH3I','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'OH', 'HO2','NO','NO2'], 'diurnal_sp' : ['IO','I2', 'CH2I2', 'BrO' ] , 'obs_comp' : ['CH3IT','CH2I2','CH2ICl','CH2IBr','C2H5I','C3H7I','I2','IO'] , 'emiss_specs': ['CH3IT','CH2I2','CH2ICl','CH2IBr','I2','HOI'] , 'w_dep_specs': ['I2' ,'HI' ,'HOI' ,'IONO', 'IONO2','I2O2', 'I2O4', 'I2O3' ,'AERI'], #, 'IBr', 'ICl'] # 'd_dep_specsl' : ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3','AERI'], #, 'IO', 'OIO'] , 'd_dep_specsl1.1' : ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'AERI'], #, 'IO', 'OIO'] , 'd_dep_specs': ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df', 'I2O3df', 'AERIdf',], #, 'IOdf', 'OIOdf'], # # 'd_dep_specs1.1': ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df','AERIdf',], #, 'IOdf', 'OIOdf'], # 'I2_het_cyc' : ['RD59','RD92','RD63'], # IONO2, IONO, HOI 'I_het_loss' : [ 'RD58', 'RD62', 'RD93' ,'RD95'], # HI, I2O2, I2O4, I2O3 uptake (prev: 2OIO excuded as I2Ox formaed, IO+OIO included as I2O3 not treated ) #['RD60','RD61','RD62','RD52','RD53','RD54','RD55','RD13'], # RD13 = OIO + OH => HIO3 86 => AERI loss 'NOx' : ['NO', 'NO2' ], 'N_specs' : ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2', 'IONO', 'IONO2', 'INO'], 'N_specs_no_I' : ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2'], 'I_N_tags' : ['RD10', 'RD23', 'RD19', 'RD16', 'RD22', 'RD56', 'RD24', 'LO3_30', 'RD69', 'RD68', 'RD20', 'RD21', 'RD25', 'LO3_39', 'RD17', 'RD18', 'RD75'], 'Br_N_tags' : ['LR7', 'LR18', 'LR17', 'LR11', 'LR8', 'LR20', 'LR26', 'LR28', 'LR27'], 'inactive_I' : ['BrCl', 'OClO', 'ClO', 'HOCl', 'Cl', 'Cl2', 'I2O5', 'I2O', 'HIO3', 'IBr', 'ICl', 'C2H5I','C3H7I'], # I2O3 now active. 'active_I' : ['I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'], 'surface_specs' : ['O3', 'NO', 'NO2', 'NO3' ,'N2O5', 'IO', 'IONO2' ], # Model run title dictionaries 'run_name_dict': {'run': 'Halogens (I+,Br+)', 'Br_2ppt': 'Halogens (I+,Br+) + fixed 2 pptv BrO', 'just_I': 'Just Iodine (I+,Br-)', 'no_hal': 'No Halogens', 'just_Br': 'Just Bromine (I-,Br+)', 'Br_1ppt': 'Halogens (I+,Br+) + fixed 1 pptv BrO', 'obs': 'Observations'} , 'latex_run_names': {'I2Ox_half': 'I$_{2}$Ox loss ($\\gamma$) /2', 'run': 'Iodine simulation.', 'MacDonald_iodide': 'Ocean iodide', 'Sulfate_up': 'Sulfate Uptake', 'I2Ox_phot_exp': 'I$_{2}$Ox exp. X-sections', 'het_double': 'het. cycle ($\\gamma$) x2', 'I2Ox_phot_x2': 'I$_{2}$Ox X-sections x2', 'no_het': 'no het. cycle ', 'I2Ox_double': 'I$_{2}$Ox loss ($\\gamma$) x2', 'just_I': '(I+,Br-)', 'BrO1pptv': 'MBL BrO 1 pptv', 'het_half': 'het. cycle ($\\gamma$) /2', 'Just_I_org': 'Just org. I', 'no_I2Ox': 'No I$_{2}$Ox Photolysis', 'BrO1pptv_ALL' : 'BrO 1 pptv in Trop.', 'BrO2pptv' : 'MBL BrO 2 pptv', # adjust from GBC to ACP names # 'no_hal': '(I-,Br-)', 'Just_Br': '(I-,Br+)', 'no_hal': 'No Halogens', 'Just_Br': 'GEOS-Chem (v9-2)', # kludge for diurnal plot 'Iodine simulation.':'Iodine simulation.', '(I+,Br+)': 'Iodine simulation.','(I+,Br-)': 'Just Iodine', '(I-,Br+)': 'GEOS-Chem (v9-2)', '(I-,Br-)': 'No Halogens'}, # tracer unit handling 'spec_2_pptv' : ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'CH3IT', 'CH2I2', 'IBr', 'ICl', 'I', 'HIO3', 'I2O', 'INO', 'I2O3', 'I2O4', 'I2O5', 'AERI', 'Cl2', 'Cl', 'HOCl', 'ClO', 'OClO', 'BrCl', 'CH2ICl', 'CH2IBr', 'C3H7I', 'C2H5I', 'Br2', 'Br', 'BrO', 'HOBr', 'HBr', 'BrNO2', 'BrNO3', 'CHBr3', 'CH2Br2', 'CH3Br','RCHO', 'MVK', 'MACR', 'PMN', 'PPN', 'R4N2', 'DMS', 'SO4s', 'MSA', 'NITs', 'BCPO', 'DST4', 'ISOPN', 'MOBA', 'PROPNN', 'HAC', 'GLYC', 'MMN', 'RIP', 'IEPOX', 'MAP' ,'N2O5','NO3'], # 'HNO4', 'HNO2'], 'spec_2_pptC' : ['PRPE', 'ISOP'], # global 'spec_2_ppbv': ['NO','DMS', 'RIP', 'IEPOX','BCPO', 'DST4', 'HAC', 'GLYC','MACR', 'ISOP'], 'spec_2_ppbC' : ['ALK4'], # pf dictionaries # WARNING - remove non forwards combatible dicts: # (GCFP_TRA_d ... GCFP_d2TRA ... GCFP_d2TRA_justTRA . etc) # GCFP_TRA_d is in use by CVO plotters - use what_species_am_i instead! 'GCFP_TRA_d' : {'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_96': 'C2H5I', 'TRA_95': 'C3H7I', 'TRA_94': 'CH2IBr', 'TRA_93': 'CH2ICl', 'TRA_92': 'BrCl', 'TRA_91': 'OClO', 'TRA_90': 'ClO', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'I2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_68': 'HOI', 'TRA_69': 'IO', 'TRA_71': 'HI', 'TRA_70': 'OIO', 'TRA_73': 'IONO2', 'TRA_72': 'IONO', 'TRA_75': 'CH3IT', 'TRA_74': 'I2O2', 'TRA_77': 'IBr', 'TRA_76': 'CH2I2', 'TRA_79': 'I', 'TRA_78': 'ICl', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'TRA_23': 'HNO4', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'HIO3', 'TRA_81': 'I2O', 'TRA_82': 'INO', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'I2O5', 'TRA_86': 'AERI', 'TRA_87': 'Cl2', 'TRA_88': 'Cl', 'TRA_89': 'HOCl','O3':'O3', 'CO':'CO'} , # GCFP_d2TRA is in use by IO plotters - use what_species_am_i instead! 'GCFP_d2TRA' : {'HIO3': 'TRA_80', 'OCPO': 'TRA_37', 'PPN': 'TRA_16', 'OCPI': 'TRA_35', 'O3': 'TRA_2', 'PAN': 'TRA_3', 'ACET': 'TRA_9', 'IEPOX': 'TRA_62', 'BrNO3': 'TRA_50', 'Br': 'TRA_45', 'HBr': 'TRA_48', 'HAC': 'TRA_58', 'ALD2': 'TRA_11', 'HNO3': 'TRA_7', 'HNO2': 'TRA_66', 'C2H5I': 'TRA_96', 'HNO4': 'TRA_23', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'TRA_18', 'HI': 'TRA_71', 'CH2I2': 'TRA_76', 'IONO2': 'TRA_73', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'C3H8': 'TRA_19', 'DMS': 'TRA_25', 'CH2O': 'TRA_20', 'CH3IT': 'TRA_75','CH3I': 'TRA_75', 'NO2': 'TRA_64', 'NO3': 'TRA_65', 'N2O5': 'TRA_22', 'CHBr3': 'TRA_51', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'HOCl': 'TRA_89', 'NITs': 'TRA_33', 'RCHO': 'TRA_12', 'C2H6': 'TRA_21', 'MPN': 'TRA_54', 'INO': 'TRA_82', 'MP': 'TRA_24', 'CH2Br2': 'TRA_52', 'SALC': 'TRA_43', 'NH3': 'TRA_30', 'CH2ICl': 'TRA_93', 'RIP': 'TRA_61', 'ClO': 'TRA_90', 'NO': 'TRA_1', 'SALA': 'TRA_42', 'MOBA': 'TRA_56', 'R4N2': 'TRA_17', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'PMN': 'TRA_15', 'CO': 'TRA_4', 'CH2IBr': 'TRA_94', 'ISOP': 'TRA_6', 'BCPO': 'TRA_36', 'MVK': 'TRA_13', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'Cl': 'TRA_88', 'I2O2': 'TRA_74', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'I2O5': 'TRA_85', 'MEK': 'TRA_10', 'MMN': 'TRA_60', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'I2O': 'TRA_81', 'ALK4': 'TRA_5', 'MSA': 'TRA_29', 'I2': 'TRA_67', 'Br2': 'TRA_44', 'IBr': 'TRA_77', 'MACR': 'TRA_14', 'I': 'TRA_79', 'AERI': 'TRA_86', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'NH4': 'TRA_31', 'SO2': 'TRA_26', 'SO4': 'TRA_27', 'IO': 'TRA_69', 'H2O2': 'TRA_8', 'BCPI': 'TRA_34', 'ICl': 'TRA_78', 'GLYC': 'TRA_59','ALK4': 'ALK4', 'MSA': 'MSA', 'MO2': 'MO2', 'C3H8': 'C3H8', 'ISOP': 'ISOP', 'DMS': 'DMS', 'CH2O': 'CH2O', 'O3': 'O3', 'PAN': 'PAN', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'NO': 'NO', 'PPN': 'PPN', 'R4N2': 'R4N2', 'HO2': 'HO2', 'NO2': 'NO2', 'PMN': 'PMN', 'ACET': 'ACET', 'CO': 'CO', 'ALD2': 'ALD2', 'RCHO': 'RCHO', 'HNO3': 'HNO3', 'HNO2': 'HNO2', 'SO2': 'SO2', 'SO4': 'SO4', 'HNO4': 'HNO4', 'C2H6': 'C2H6', 'RO2': 'RO2', 'MVK': 'MVK', 'PRPE': 'PRPE', 'OH': 'OH', 'ETO2': 'ETO2', 'MEK': 'MEK', 'MP': 'MP' , 'GMAO_TEMP':'GMAO_TEMP' }, # 'GCFP_d2TRA_justTRA' : {'HIO3': 'TRA_80', 'OCPO': 'TRA_37', 'PPN': 'TRA_16', 'OCPI': 'TRA_35', 'O3': 'TRA_2', 'PAN': 'TRA_3', 'ACET': 'TRA_9', 'IEPOX': 'TRA_62', 'BrNO3': 'TRA_50', 'Br': 'TRA_45', 'HBr': 'TRA_48', 'HAC': 'TRA_58', 'ALD2': 'TRA_11', 'HNO3': 'TRA_7', 'HNO2': 'TRA_66', 'C2H5I': 'TRA_96', 'HNO4': 'TRA_23', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'TRA_18', 'HI': 'TRA_71', 'CH2I2': 'TRA_76', 'IONO2': 'TRA_73', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'C3H8': 'TRA_19', 'DMS': 'TRA_25', 'CH2O': 'TRA_20', 'CH3IT': 'TRA_75','CH3I': 'TRA_75', 'NO2': 'TRA_64', 'NO3': 'TRA_65', 'N2O5': 'TRA_22', 'CHBr3': 'TRA_51', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'HOCl': 'TRA_89', 'NITs': 'TRA_33', 'RCHO': 'TRA_12', 'C2H6': 'TRA_21', 'MPN': 'TRA_54', 'INO': 'TRA_82', 'MP': 'TRA_24', 'CH2Br2': 'TRA_52', 'SALC': 'TRA_43', 'NH3': 'TRA_30', 'CH2ICl': 'TRA_93', 'RIP': 'TRA_61', 'ClO': 'TRA_90', 'NO': 'TRA_1', 'SALA': 'TRA_42', 'MOBA': 'TRA_56', 'R4N2': 'TRA_17', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'PMN': 'TRA_15', 'CO': 'TRA_4', 'CH2IBr': 'TRA_94', 'ISOP': 'TRA_6', 'BCPO': 'TRA_36', 'MVK': 'TRA_13', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'Cl': 'TRA_88', 'I2O2': 'TRA_74', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'I2O5': 'TRA_85', 'MEK': 'TRA_10', 'MMN': 'TRA_60', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'I2O': 'TRA_81', 'ALK4': 'TRA_5', 'MSA': 'TRA_29', 'I2': 'TRA_67', 'Br2': 'TRA_44', 'IBr': 'TRA_77', 'MACR': 'TRA_14', 'I': 'TRA_79', 'AERI': 'TRA_86', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'NH4': 'TRA_31', 'SO2': 'TRA_26', 'SO4': 'TRA_27', 'IO': 'TRA_69', 'H2O2': 'TRA_8', 'BCPI': 'TRA_34', 'ICl': 'TRA_78', 'GLYC': 'TRA_59','OH': 'OH','HO2': 'HO2',}, 'GCFP_d2TRA_all_1.6' :{'HIO3': 'TRA_80', 'TRA_17': 'TRA_17', 'TRA_16': 'TRA_16', 'TRA_15': 'TRA_15', 'TRA_14': 'TRA_14', 'TRA_13': 'TRA_13', 'TRA_12': 'TRA_12', 'TRA_11': 'TRA_11', 'TRA_19': 'TRA_19', 'ACET': 'ACET', 'RIP': 'TRA_61', 'BrNO3': 'TRA_50', 'HAC': 'TRA_58', 'ALD2': 'ALD2', 'HNO3': 'HNO3', 'HNO2': 'HNO2', 'HNO4': 'HNO4', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'PRPE', 'TRA_29': 'TRA_29', 'CH2I2': 'TRA_76', 'I2O2': 'TRA_74', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'MO2': 'MO2', 'C3H8': 'C3H8', 'I2O5': 'TRA_85', 'TRA_71': 'TRA_71', 'TRA_70': 'TRA_70', 'TRA_73': 'TRA_73', 'DMS': 'DMS', 'TRA_75': 'TRA_75', 'TRA_74': 'TRA_74', 'TRA_77': 'TRA_77', 'TRA_76': 'TRA_76', 'CH2O': 'CH2O', 'TRA_78': 'TRA_78', 'CH3IT': 'TRA_75', 'NO2': 'NO2', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'PAN': 'PAN', 'HOCl': 'TRA_89', 'TRA_18': 'TRA_18', 'GMAO_TEMP': 'GMAO_TEMP', 'RCHO': 'RCHO', 'C2H6': 'C2H6', 'INO': 'TRA_82', 'MP': 'MP', 'CH2Br2': 'TRA_52', 'CH2ICl': 'TRA_93', 'TRA_59': 'TRA_59', 'TRA_58': 'TRA_58', 'IEPOX': 'TRA_62', 'TRA_53': 'TRA_53', 'TRA_52': 'TRA_52', 'TRA_51': 'TRA_51', 'TRA_50': 'TRA_50', 'TRA_57': 'TRA_57', 'TRA_56': 'TRA_56', 'TRA_55': 'TRA_55', 'TRA_54': 'TRA_54', 'MOBA': 'TRA_56', 'CH3I': 'TRA_75', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'CO': 'CO', 'BCPI': 'TRA_34', 'ISOP': 'ISOP', 'BCPO': 'TRA_36', 'MVK': 'MVK', 'TRA_28': 'TRA_28', 'Cl': 'TRA_88', 'TRA_26': 'TRA_26', 'TRA_27': 'TRA_27', 'TRA_24': 'TRA_24', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'TRA_23': 'TRA_23', 'TRA_20': 'TRA_20', 'TRA_21': 'TRA_21', 'MMN': 'TRA_60', 'I2O': 'TRA_81', 'HBr': 'TRA_48', 'ALK4': 'ALK4', 'I2': 'TRA_67', 'PPN': 'PPN', 'IBr': 'TRA_77', 'I': 'TRA_79', 'AERI': 'TRA_86', 'NH4': 'TRA_31', 'SO2': 'SO2', 'SO4': 'SO4', 'NH3': 'TRA_30', 'TRA_08': 'TRA_08', 'TRA_09': 'TRA_09', 'TRA_01': 'TRA_01', 'TRA_02': 'TRA_02', 'TRA_03': 'TRA_03', 'TRA_04': 'TRA_04', 'TRA_05': 'TRA_05', 'TRA_06': 'TRA_06', 'TRA_07': 'TRA_07', 'OCPI': 'TRA_35', 'OCPO': 'TRA_37', 'Br2': 'TRA_44', 'O3': 'O3', 'Br': 'TRA_45', 'TRA_96': 'TRA_96', 'TRA_95': 'TRA_95', 'TRA_94': 'TRA_94', 'TRA_93': 'TRA_93', 'TRA_92': 'TRA_92', 'TRA_91': 'TRA_91', 'TRA_90': 'TRA_90', 'TRA_62': 'TRA_62', 'TRA_63': 'TRA_63', 'TRA_60': 'TRA_60', 'TRA_61': 'TRA_61', 'TRA_66': 'TRA_66', 'TRA_67': 'TRA_67', 'C2H5I': 'TRA_96', 'TRA_65': 'TRA_65', 'TRA_68': 'TRA_68', 'TRA_69': 'TRA_69', 'OH': 'OH', 'IONO2': 'TRA_73', 'HI': 'TRA_71', 'CHBr3': 'TRA_51', 'TRA_46': 'TRA_46', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'NITs': 'TRA_33', 'TRA_48': 'TRA_48', 'TRA_49': 'TRA_49', 'TRA_44': 'TRA_44', 'TRA_45': 'TRA_45', 'RO2': 'RO2', 'TRA_47': 'TRA_47', 'TRA_40': 'TRA_40', 'TRA_41': 'TRA_41', 'TRA_42': 'TRA_42', 'TRA_43': 'TRA_43', 'MPN': 'TRA_54', 'ETO2': 'ETO2', 'IO': 'TRA_69', 'TRA_64': 'TRA_64', 'ClO': 'TRA_90', 'NO': 'NO', 'SALA': 'TRA_42', 'SALC': 'TRA_43', 'R4N2': 'R4N2', 'PMN': 'PMN', 'TRA_25': 'TRA_25', 'CH2IBr': 'TRA_94', 'TRA_22': 'TRA_22', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'MEK': 'MEK', 'TRA_72': 'TRA_72', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'TRA_79': 'TRA_79', 'MSA': 'MSA', 'TRA_39': 'TRA_39', 'TRA_38': 'TRA_38', 'GLYC': 'TRA_59', 'TRA_35': 'TRA_35', 'TRA_34': 'TRA_34', 'TRA_37': 'TRA_37', 'TRA_36': 'TRA_36', 'TRA_31': 'TRA_31', 'TRA_30': 'TRA_30', 'TRA_33': 'TRA_33', 'TRA_32': 'TRA_32', 'HO2': 'HO2', 'MACR': 'TRA_14', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'ICl': 'TRA_78', 'TRA_80': 'TRA_80', 'TRA_81': 'TRA_81', 'TRA_82': 'TRA_82', 'TRA_83': 'TRA_83', 'TRA_84': 'TRA_84', 'TRA_85': 'TRA_85', 'TRA_86': 'TRA_86', 'TRA_87': 'TRA_87', 'TRA_88': 'TRA_88', 'TRA_89': 'TRA_89','GMAO_TEMP': 'GMAO_TEMP', 'GMAO_UWND': 'GMAO_UWND', 'GMAO_VWND': 'GMAO_VWND'}, # 'GCFP_d2TRA_all_1.7' : {'TRA_74': 'ICl', 'TRA_25': 'DMS', 'TRA_68': 'CH2I2', 'TRA_44': 'Br2', 'TRA_70': 'CH2IBr', 'TRA_22': 'N2O5', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_23': 'HNO4', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_21': 'C2H6', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_69': 'CH2ICl', 'TRA_50': 'BrNO3', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_73': 'IBr', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_77': 'HI', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_72': 'I2', 'TRA_59': 'GLYC', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_20': 'CH2O', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_75': 'I', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_58': 'HAC', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_27': 'SO4', 'TRA_78': 'OIO', 'TRA_66': 'HNO2', 'TRA_71': 'HOI', 'TRA_24': 'MP', 'TRA_67': 'CH3IT', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO'}, 'GCFP_d2TRA_all_1.7' : {'TRA_25': 'DMS', 'TRA_77': 'HI', 'TRA_76': 'IO', 'TRA_23': 'HNO4', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_79': 'INO', 'TRA_78': 'OIO', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_52': 'CH2Br2', 'TRA_46': 'BrO', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_47': 'HOBr', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_81': 'IONO2', 'TRA_35': 'OCPI', 'TRA_57': 'PROPNN', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_56': 'MOBA', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_84': 'I2O4', 'TRA_54': 'MPN', 'TRA_5': 'ALK4', 'TRA_49': 'BrNO2', 'TRA_32': 'NIT', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_59': 'GLYC', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_17': 'R4N2', 'TRA_28': 'SO4s', 'TRA_16': 'PPN', 'TRA_58': 'HAC', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_29': 'MSA', 'TRA_22': 'N2O5', 'TRA_73': 'IBr', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_80': 'IONO', 'TRA_26': 'SO2', 'TRA_82': 'I2O2', 'TRA_72': 'I2', 'TRA_48': 'HBr', 'TRA_85': 'AERI', 'TRA_34': 'BCPI', 'TRA_75': 'I', 'TRA_53': 'CH3Br', 'TRA_74': 'ICl'}, 'GCFP_d2TRA_all_1.7_EOH_actual_names' : {'HNO4': 'HNO4', 'PPN': 'PPN', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'O3': 'O3', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'GMAO_UWND': 'GMAO_UWND', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_65': 'NO3', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'OH': 'OH', 'LAT': 'LAT', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_73': 'IBr', 'TRA_72': 'I2', 'TRA_75': 'I', 'TRA_74': 'ICl', 'TRA_77': 'HI', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_78': 'OIO', 'NO2': 'NO2', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'GMAO_VWND': 'GMAO_VWND', 'PAN': 'PAN', 'GMAO_TEMP': 'GMAO_TEMP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'NO': 'NO', 'PMN': 'PMN', 'HNO3': 'HNO3', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'TRA_23': 'HNO4', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'RO2': 'RO2', 'LON': 'LON', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'HO2': 'HO2', 'SO2': 'SO2', 'SO4': 'SO4', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'HNO2': 'HNO2', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_86': 'EOH'}, 'TRA_spec_met_all_1.7_EOH': {'MAO3': 'MAO3', 'DHMOB': 'DHMOB', 'ETP': 'ETP', 'RCO3': 'RCO3', 'MO2': 'MO2', 'EOH': 'EOH', 'MVKN': 'MVKN', 'R4P': 'R4P', 'ISNP': 'ISNP', 'RB3P': 'RB3P', 'MGLY': 'MGLY', 'MAOPO2': 'MAOPO2', 'RIO2': 'RIO2', 'PMNN': 'PMNN', 'PP': 'PP', 'VRP': 'VRP', 'RP': 'RP', 'MRO2': 'MRO2', 'HC5': 'HC5', 'ATO2': 'ATO2', 'PYAC': 'PYAC', 'R4N1': 'R4N1', 'DIBOO': 'DIBOO', 'LISOPOH': 'LISOPOH', 'HO2': 'HO2', 'ETHLN': 'ETHLN', 'ISNOOB': 'ISNOOB', 'ISNOOA': 'ISNOOA', 'ROH': 'ROH', 'MAN2': 'MAN2', 'B3O2': 'B3O2', 'INPN': 'INPN', 'MACRN': 'MACRN', 'PO2': 'PO2', 'VRO2': 'VRO2', 'MRP': 'MRP', 'PRN1': 'PRN1', 'ISNOHOO': 'ISNOHOO', 'MOBAOO': 'MOBAOO', 'MACRNO2': 'MACRNO2', 'ISOPND': 'ISOPND', 'HC5OO': 'HC5OO', 'ISOPNBO2': 'ISOPNBO2', 'RA3P': 'RA3P', 'ISOPNB': 'ISOPNB', 'ISOPNDO2': 'ISOPNDO2', 'PMNO2': 'PMNO2', 'IAP': 'IAP', 'MCO3': 'MCO3', 'IEPOXOO': 'IEPOXOO', 'MAOP': 'MAOP', 'INO2': 'INO2', 'OH': 'OH', 'PRPN': 'PRPN', 'GLYX': 'GLYX', 'A3O2': 'A3O2', 'ETO2': 'ETO2', 'R4O2': 'R4O2', 'ISN1': 'ISN1', 'KO2': 'KO2', 'ATOOH': 'ATOOH','GMAO_PSFC': 'GMAO_PSFC', 'GMAO_SURF': 'GMAO_SURF', 'GMAO_TEMP': 'GMAO_TEMP', 'GMAO_ABSH': 'GMAO_ABSH', 'GMAO_UWND': 'GMAO_UWND', 'GMAO_VWND': 'GMAO_VWND', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO', 'TRA_74': 'ICl', 'TRA_25': 'DMS', 'TRA_68': 'CH2I2', 'TRA_44': 'Br2', 'TRA_70': 'CH2IBr', 'TRA_22': 'N2O5', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_23': 'HNO4', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_21': 'C2H6', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_69': 'CH2ICl', 'TRA_50': 'BrNO3', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_73': 'IBr', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_77': 'HI', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_72': 'I2', 'TRA_59': 'GLYC', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_20': 'CH2O', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_75': 'I', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_58': 'HAC', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_27': 'SO4', 'TRA_78': 'OIO', 'TRA_66': 'HNO2', 'TRA_71': 'HOI', 'TRA_24': 'MP', 'TRA_67': 'CH3IT' }, 'TRA_spec_met_all_1.7_EOH_no_trailing_zeroes': {'EOH': 'EOH', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'DHMOB': 'DHMOB', 'RP': 'RP', 'GMAO_UWND': 'GMAO_UWND', 'MAN2': 'MAN2', 'B3O2': 'B3O2', 'MRP': 'MRP', 'PRN1': 'PRN1', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'IAP': 'IAP', 'MCO3': 'MCO3', 'GMAO_SURF': 'GMAO_SURF', 'OH': 'OH', 'PRPN': 'PRPN', 'TRA7': 'HNO3', 'TRA6': 'ISOP', 'MAO3': 'MAO3', 'RCO3': 'RCO3', 'MO2': 'MO2', 'MACRNO2': 'MACRNO2', 'TRA_23': 'HNO4', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_73': 'IBr', 'TRA_72': 'I2', 'TRA_75': 'I', 'TRA_74': 'ICl', 'ISNP': 'ISNP', 'TRA_76': 'IO', 'TRA_79': 'INO', 'RB3P': 'RB3P', 'TRA_51': 'CHBr3', 'ROH': 'ROH', 'PP': 'PP', 'ISOPNDO2': 'ISOPNDO2', 'HC5': 'HC5', 'TRA9': 'ACET', 'TRA_56': 'MOBA', 'MACRN': 'MACRN', 'DIBOO': 'DIBOO', 'MRO2': 'MRO2', 'INPN': 'INPN', 'GMAO_TEMP': 'GMAO_TEMP', 'PO2': 'PO2', 'ISOPND': 'ISOPND', 'TRA_48': 'HBr', 'TRA_1': 'NO', 'RA3P': 'RA3P', 'ISOPNB': 'ISOPNB', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'IEPOXOO': 'IEPOXOO', 'MAOP': 'MAOP', 'INO2': 'INO2', 'ETO2': 'ETO2', 'ISN1': 'ISN1', 'TRA_49': 'BrNO2', 'ETP': 'ETP', 'TRA5': 'ALK4', 'TRA4': 'CO', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA1': 'NO', 'R4P': 'R4P', 'TRA_3': 'PAN', 'TRA2': 'O3', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'MAOPO2': 'MAOPO2', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'GMAO_VWND': 'GMAO_VWND', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'RIO2': 'RIO2', 'VRP': 'VRP', 'R4N1': 'R4N1', 'GMAO_PSFC': 'GMAO_PSFC', 'VRO2': 'VRO2', 'ISNOHOO': 'ISNOHOO', 'HC5OO': 'HC5OO', 'ETHLN': 'ETHLN', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'R4O2': 'R4O2', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_77': 'HI', 'MVKN': 'MVKN', 'TRA_35': 'OCPI', 'TRA_78': 'OIO', 'MGLY': 'MGLY', 'PMNN': 'PMNN', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'ATO2': 'ATO2', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'LISOPOH': 'LISOPOH', 'HO2': 'HO2', 'GMAO_ABSH': 'GMAO_ABSH', 'TRA8': 'H2O2', 'ISNOOB': 'ISNOOB', 'ISNOOA': 'ISNOOA', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'ISOPNBO2': 'ISOPNBO2', 'MOBAOO': 'MOBAOO', 'PMNO2': 'PMNO2', 'GLYX': 'GLYX', 'A3O2': 'A3O2', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'PYAC': 'PYAC', 'KO2': 'KO2', 'ATOOH': 'ATOOH', 'PRESS':'PRESS', 'U10M':'U10M'}, 'red_specs_f_name': ['O3', 'NO2', 'NO', 'NO3', 'N2O5', 'HNO4', 'HNO3', 'HNO2', 'PAN', 'PPN', 'PMN', 'H2O2', 'HO2', 'OH', 'RO2', 'SO2', 'SO4', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND', 'I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'], # Photolysis/Fast-J 'FastJ_lower' : [289.0, 298.25, 307.45, 312.45, 320.3, 345.0, 412.45], 'FastJ_upper' : [298.25, 307.45, 312.45, 320.3, 345.0, 412.45, 850.0], 'FastJ_mids' : [294,303,310,316,333,380,574], } if rtn_dict: return GC_var_dict else: return GC_var_dict[input_x] # -------------- # 4.05 - GEOS-Chem/ctm.bpch values # -------------- def latex_spec_name(input_x, debug=False): spec_dict = { 'OIO': 'OIO', 'C3H7I': 'C$_{3}$H$_{7}$I', 'IO': 'IO', 'I': 'I', 'I2': 'I$_{2}$', 'CH2ICl': 'CH$_{2}$ICl', 'HOI': 'HOI', 'CH2IBr': 'CH$_{2}$IBr', 'C2H5I': 'C$_{2}$H$_{5}$I', 'CH2I2': 'CH$_{2}$I$_{2}$', 'CH3IT': 'CH$_{3}$I', 'IONO': 'IONO','HIO3': 'HIO$_{3}$', 'ICl': 'ICl', 'I2O3': 'I$_{2}$O$_{3}$', 'I2O4': 'I$_{2}$O$_{4}$', 'I2O5': 'I$_{2}$O$_{5}$', 'INO': 'INO', 'I2O': 'I$_{2}$O', 'IBr': 'IBr','I2O2': 'I$_{2}$O$_{2}$', 'IONO2': 'IONO$_{2}$', 'HI':'HI', 'BrO':'BrO','Br':'Br','HOBr':'HOBr','Br2':'Br$_{2}$','CH3Br':'CH$_{3}$Br','CH2Br2':'CH$_{2}$Br$_{2}$', 'CHBr3':'CHBr$_{3}$','O3':'O$_{3}$', 'CO':'CO' , 'DMS':'DMS', 'NOx':'NOx', 'NO':'NO', 'NO2':'NO$_{2}$', 'NO3':'NO$_{3}$','HNO3':'HNO$_{3}$', 'HNO4':'HNO$_{4}$','PAN':'PAN', 'HNO2':'HNO$_{2}$', 'N2O5':'N$_{2}$O$_{5}$','ALK4':'>= C4 alkanes','ISOP':'Isoprene' ,'H2O2':'H$_{2}$O$_{2}$','ACET':'CH$_{3}$C(O)CH$_{3}$', 'MEK':'>C3 ketones', 'ALD2':'CH$_{3}$CHO', 'RCHO': 'CH$_{3}$CH$_{2}$CHO', 'MVK':'CH$_{2}$=CHC(O)CH$_{3}$', 'MACR':'Methacrolein', 'PMN':'CH$_{2}$=C(CH$_{3}$)C(O)OONO$_{2}$', 'PPN':'CH$_{3}$CH$_{2}$C(O)OONO$_{2}$', 'R4N2':'>= C4 alkylnitrates','PRPE':'>= C4 alkenes', 'C3H8':'C$_{3}$H$_{8}$','CH2O':'CH$_{2}$O', 'C2H6':'C$_{2}$H$_{6}$', 'MP':'CH$_{3}$OOH', 'SO2':'SO$_{2}$', 'SO4':'SO$_{4}$','SO4s':'SO$_{4}$ on SSA', 'MSA':'CH$_{4}$SO$_{3}$','NH3':'NH$_{3}$', 'NH4': 'NH$_{4}$', 'NIT': 'InOrg N', 'NITs': 'InOrg N on SSA', 'BCPI':'BCPI', 'OCPI':'OCPI', 'BCPO':'BCPO','OCPO':'OCPO', 'DST1':'DST1', 'DST2':'DST2','DST3':'DST3','DST4':'DST4','SALA':'SALA', 'SALC':'SALC', 'HBr':'HBr', 'BrNO2': 'BrNO$_{2}$', 'BrNO3': 'BrNO$_{3}$', 'MPN':'CH$_{3}$ON$_{2}$', 'ISOPN':'ISOPN', 'MOBA':'MOBA', 'PROPNN':'PROPNN', 'HAC':'HAC', 'GLYC':'GLYC', 'MMN':'MMN', 'RIP':'RIP', 'IEPOX':'IEPOX','MAP':'MAP', 'AERI':'Aerosol Iodine' , 'Cl2':'Cl$_{2}$', 'Cl':'Cl','HOCl':'HOCl','ClO':'ClO','OClO':'OClO','BrCl':'BrCl', 'HI+OIO+IONO+INO':'HI+OIO+IONO+INO','CH2IX':'CH$_{2}$IX', 'IxOy':'I$_{2}$O$_{X}$ ($_{X}$=2,3,4)', 'CH3I':'CH$_{3}$I', 'OH':'OH', 'HO2':'HO$_{2}$', 'MO2':'MO$_{2}$', 'RO2':'RO$_{2}$' ,'RD01':r'I + O$_{3}$ $\rightarrow$ IO + O$_{2}$' # Analysis names ,'iodine_all':'All Iodine', 'Iy': 'I$_{Y}$', 'IOy': 'IO$_{Y}$', 'IyOx': 'I$_{Y}$O$_{X}$', 'IOx': 'IO$_{X}$','iodine_all_A':'All Iodine (Inc. AERI)', 'I2Ox': 'I$_{2}$O$_{X}$' , 'AERI/SO4': 'AERI/SO4', 'EOH':'Ethanol' , 'PSURF': 'Pressure at the bottom of level', 'GMAO_TEMP' : 'Temperature', 'TSKIN' : 'Temperature at 2m', 'GMAO_UWND':'Zonal Wind', 'GMAO_VWND':'Meridional Wind', 'U10M':'10m Meridional Wind', 'V10M': '10m Zonal Wind', 'CH2OO':'CH$_{2}$OO', # Family Names 'N_specs' : 'NOy', 'NOy' : 'NO$_Y$', 'N_specs_no_I' : 'NOy exc. iodine', # typos 'CH2BR2':'CH$_{2}$Br$_{2}$', } return spec_dict[input_x] # -------------- # 4.06 - converts P/L tracer mulitpler to 1 # -------------- def p_l_unity(rxn, debug=False): p_l_dict = { 'LR24': 1.0, 'LR25': 1.0, 'LR26': 1.0, 'LR27': 1.0, 'LR20': 1.0, 'LR21': 1.0, 'LR22': 1.0, 'LR30': 1.0, 'LR31': 1.0, 'LR23': 1.0, 'LR28': 1.0, 'LR29': 1.0, 'RD09': 1.0, 'PO3_46': 0.25, 'LR3': 1.0, 'LR2': 1.0, 'RD02': 1.0, 'PO3_03': 0.3, 'PO3_14': 1.0, 'PO3_02': 0.15, 'PO3_05': 0.15 } return p_l_dict[rxn] # -------------- # 4.07 - Returns tracers unit and scale (if requested) # -------------- def tra_unit(x, scale=False, adjustment=False, adjust=True, \ global_unit=False, ClearFlo_unit=False, debug=False ): tra_unit = { 'OCPI': 'ppbv', 'OCPO': 'ppbv', 'PPN': 'ppbv', 'HIO3': 'pptv', 'O3': 'ppbv', 'PAN': 'ppbv', 'ACET': 'ppbC', 'RIP': 'ppbv', 'BrNO3': 'pptv', 'Br': 'pptv', 'HBr': 'pptv', 'HAC': 'ppbv', 'ALD2': 'ppbC', 'HNO3': 'ppbv', 'HNO2': 'ppbv', 'C2H5I': 'pptv', 'HNO4': 'ppbv', 'OIO': 'pptv', 'MAP': 'ppbv', 'PRPE': 'ppbC', 'HI': 'pptv', 'CH2I2': 'pptv', 'IONO2': 'pptv', 'NIT': 'ppbv', 'CH3Br': 'pptv', 'C3H7I': 'pptv', 'C3H8': 'ppbC', 'DMS': 'ppbv', 'CH2O': 'ppbv', 'CH3IT': 'pptv', 'NO2': 'ppbv', 'NO3': 'ppbv', 'N2O5': 'ppbv', 'CHBr3': 'pptv', 'DST4': 'ppbv', 'DST3': 'ppbv', 'DST2': 'ppbv', 'DST1': 'ppbv', 'HOCl': 'ppbv', 'NITs': 'ppbv', 'RCHO': 'ppbv', 'C2H6': 'ppbC', 'MPN': 'ppbv', 'INO': 'pptv', 'MP': 'ppbv', 'CH2Br2': 'pptv', 'SALC': 'ppbv', 'NH3': 'ppbv', 'CH2ICl': 'pptv', 'IEPOX': 'ppbv', 'ClO': 'ppbv', 'NO': 'pptv', 'SALA': 'ppbv', 'MOBA': 'ppbv', 'R4N2': 'ppbv', 'BrCl': 'pptv', 'OClO': 'ppbv', 'PMN': 'ppbv', 'CO': 'ppbv', 'CH2IBr': 'pptv', 'ISOP': 'ppbC', 'BCPO': 'ppbv', 'MVK': 'ppbv', 'BrNO2': 'pptv', 'IONO': 'pptv', 'Cl2': 'ppbv', 'HOBr': 'pptv', 'PROPNN': 'ppbv', 'Cl': 'ppbv', 'I2O2': 'pptv', 'I2O3': 'pptv', 'I2O4': 'pptv', 'I2O5': 'pptv', 'MEK': 'ppbC', 'MMN': 'ppbv', 'ISOPN': 'ppbv', 'SO4s': 'ppbv', 'I2O': 'pptv', 'ALK4': 'ppbC', 'MSA': 'ppbv', 'I2': 'pptv', 'Br2': 'pptv', 'IBr': 'pptv', 'MACR': 'ppbv', 'I': 'pptv', 'AERI': 'pptv', 'HOI': 'pptv', 'BrO': 'pptv', 'NH4': 'ppbv', 'SO2': 'ppbv', 'SO4': 'ppbv', 'IO': 'pptv', 'H2O2': 'ppbv', 'BCPI': 'ppbv', 'ICl': 'pptv', 'GLYC': 'ppbv', # Extra diagnostics to allow for simplified processing 'CH3I':'pptv', 'Iy':'pptv', 'PSURF': 'hPa', 'OH':'pptv', 'HO2':'pptv', 'MO2': 'pptv', 'NOy':'pptbv','EOH': 'ppbv' , 'CO':'ppbv', 'CH4':'ppbv', 'TSKIN':'K', 'GMAO_TEMP': 'K', 'GMAO_VWND' :'m/s','GMAO_UWND': 'm/s', 'RO2': 'pptv', 'U10M':'m/s','V10M': 'm/s' , 'PRESS': 'hPa', 'CH2OO':'pptv', } units = tra_unit[x] # Adjust to appropriate scale for pf analysis if adjust: spec_2_pptv = GC_var('spec_2_pptv') spec_2_pptC = GC_var('spec_2_pptC') if ( x in spec_2_pptv ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'pptv' ) units = 'pptv' if ( x in spec_2_pptC ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'pptC' ) units = 'pptC' # Over ride adjustments for globally appro. units if global_unit: spec_2_ppbv = GC_var('spec_2_ppbv') spec_2_ppbC = GC_var('spec_2_ppbC') if ( x in spec_2_ppbv ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'ppbv' ) units = 'ppbv' if ( x in spec_2_ppbC ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'ppbC' ) units = 'ppbC' if ClearFlo_unit: if any( [ x == i for i in [ 'NO', 'MACR', 'MVK' ] ] ): units = 'ppbv' if any( [ x == i for i in [ 'PAN' ] ] ): units = 'pptv' if any( [ x == i for i in [ 'ISOP' ] ] ): units = 'ppbC' if scale: scaleby = get_unit_scaling( units ) if adjustment: if units == 'K': units = 'Deg. Celcuis' adjustby = -273.15 else: adjustby = 0 if scale and (not adjustment): return units, scaleby elif (scale and adjustment): return units, scaleby, adjustby else: return units # -------------- # 4.08 - Store of directories for servers ( earth0, atmosviz1, and tms MBP) # ------------- # moved to AC_tools.funcs4core.py # -------------- # 4.09 - Ox in species # ------------- def Ox_in_species(in_=None, rxns=False, keys=False): species_Ox = { 'HOIdf': 1.0, 'OIOdf': 2.0, 'BrNO3df': 2.0, 'HNO3df': 1.0, 'PPNdf': 1.0, 'IOdf': 1.0, 'N2O5df': 3.0, 'IONOdf': 1.0, 'PMNdf': 1.0, 'BrNO2df': 1.0, 'I2O4df': 4, 'MPNdf': 1.0, 'NO3df': 2.0, 'BrOdf': 1.0, 'HOBrdf': 1.0, 'HNO4df': 1.0, 'O3df': 1.0, 'I2O2df': 2.0, 'NO2df': 1.0, 'IONO2df': 2.0, 'PANdf': 1.0, 'OIO': 2.0, 'BrO': 1.0, 'HOBr': 1.0, 'N2O5': 3.0, 'IONO': 1.0, 'MPN': 1.0, 'BrNO2': 1.0, 'I2O2': 2.0, 'I2O4': 4, 'PPN': 1.0, 'HOI': 1.0, 'HNO3': 1.0, 'IONO2': 2.0, 'NO2': 1.0, 'IO': 1.0, 'HNO4': 1.0, 'PMN': 1.0, 'O3': 1.0, 'BrNO3': 2.0, 'PAN': 1.0, 'NO3': 2.0 } rxn_Ox = { 'LO3_18': 1.0, 'LR25': 1.0, 'RD12': 2.0, 'LR21': 1.0, 'LO3_38': 1.0, 'LO3_10': 1.0, 'LO3_34': 1.0, 'LO3_35': 1.0, 'LO3_33': 1.0, 'LO3_30': 1.0, 'LR5': 2.0, 'LR6': 2.0, 'RD37': 2.0, 'LO3_05': 1.0, 'RD11': 2.0, 'LO3_06': 1.0, 'LO3_49': 1.0, 'LO3_04': 1.0, 'LO3_03': 1.0, 'LO3_02': 1.0, 'LO3_42': 1.0, 'LO3_41': 1.0, 'LO3_40': 1.0, 'LO3_47': 1.0, 'LO3_46': 1.0, 'LO3_09': 1.0, 'LO3_44': 1.0, 'LR30': 1.0, 'LO3_24': 1.0/2.0, 'LO3_21': 1.0, 'RD23': 2.0, 'LO3_54': 2.0, 'LO3_55': 1.0, 'LO3_08': 1.0, 'LO3_50': 1.0, 'LO3_51': 1.0, 'LO3_52': 1.0, 'LO3_53': 1.0, 'LR10': 1.0, 'LO3_36':1.0, # LO3_24 set to 1 (as 0.5*CoE) even though 2 Ox equivalents are lost, this allows for contribution to bromine and iodine loss to be inclued # LOX included for processing ease 'LOX':1.0, 'POX':1.0, 'PO3_14': 1.0, 'PO3_15':1.0 , 'RD98': 1.0, 'LO3_39':1.0 , 'RD63': 1.0, # for prod analysis 'PO3_69' : 1.0/2.0, 'PO3_35': 0.85, 'PO3_03':0.15/0.3, 'PO3_70': 0.4/1.4 , 'PO3_77': 1.0/2.0 , 'RD06':1.0, 'LR9':1.0 } if (rxns): return rxn_Ox[ in_ ] if (keys): return species_Ox.keys() else: return species_Ox[ in_ ] # ---- # 4.10 - Return dictionary of gaw sites # ---- def gaw_2_name(): wdf = get_dir('dwd') +'ozonesurface/' + 'gaw_site_list.h5' df= pd.read_hdf( wdf, 'wp', mode='r' ) names = df.values[:,1] # alter long name for CVO ind=[ n for n, i in enumerate(names) if ( i =='Cape Verde Atmospheric Observatory' )] names[ind[0]] = 'Cape Verde' return dict( zip( df.index, names )) # ---- # 4.11 - Returns list of gaw sites in HDF file of O3 surface data # ---- def get_global_GAW_sites(f='gaw_site_list_global.h5'): wd= get_dir('dwd') +'ozonesurface/' df= pd.read_hdf( wd+f, 'wp', mode='r' ) vars = sorted( list(df.index) ) # Kludge: remove those not in "grouped" analysis # ( Q: why are these sites not present? - A: data control for lomb-scragle) [ vars.pop( vars.index(i) ) for i in ['ZUG', 'ZSF', 'ZEP', 'WLG', 'USH', 'SDK', 'PYR', 'PUY', 'PAL', 'MKN', 'IZO', 'HPB', 'DMV', 'BKT', 'AMS', 'ALT', 'ABP'] ] #[ 'AMS', 'MKN', 'IZO' , 'WLG', 'PYR', 'USH', 'ABP', 'ALT'] ] return vars # ---- # 4.12 - returns lon/lat/alt for res # ---- # moved to AC_tools.funcs4core # -------- # 4.13 - Get CTM (GEOS-Chem) array dimension for a given resolution # -------- # moved to AC_tools.funcs4core # -------- # 4.14 - Convert gamap category/species name to Iris/bpch name # -------- def diagnosticname_gamap2iris( x ): d={ "IJ-AVG-$": 'IJ_AVG_S', "BXHGHT-$": 'BXHEIGHT', "PORL-L=$":'PORL_L_S__', 'DAO-3D-$':'DAO_3D_S__', 'DAO-FLDS' :'DAO_FLDS__', 'DRYD-FLX': 'DRYD_FLX__', 'DRYD-VEL':'DRYD_VEL__', 'CHEM-L=$':'CHEM_L_S__', 'WETDCV-$':'WETDCV_S__', 'WETDLS-$':'WETDLS_S__', 'WD-LSW-$':'WD_LSW_S__', 'WD-LSR-$':'WD_LSR_S__', 'UP-FLX-$':'UP_FLX_S__', 'NS-FLX-$': 'NS_FLX_S__', 'EW-FLX-$':'EW_FLX_S__', 'TURBMC-$': 'TURBMC_S__', 'OD-MAP-$':'OD_MAP_S__', # 'WD-FRC-$' 'MC-FRC-$': 'MC_FRC_S__', } return d[x] # -------- # 4.14 - Get scaling for a given unit # -------- def get_unit_scaling( units ): misc = 'K', 'm/s', 'unitless', 'kg' ,'m', 'm2','kg/m2/s', \ 'molec/cm2/s', 'mol/cm3/s', 'kg/s', 'hPa' if any( [ (units == i) for i in 'pptv', 'pptC' ]): scaleby = 1E12 elif any( [ (units == i) for i in 'ppbv', 'ppbC' ]): scaleby = 1E9 elif any( [units ==i for i in misc ] ): scaleby = 1 else: print 'WARNING: This unit is not in unit lists: ', units return scaleby # ------------------------------------------- Section 5 ------------------------------------------- # -------------- Misc # # -------------- # 5.01 - Return MUTD runs - no hal, just Br, just I, and I + Br # -------------- def MUTD_runs( standard=True, sensitivity=False, titles=False, \ IO_obs=False,no_I2Ox=False, respun=True, \ preindustrial=False, skip3=False, v10v92comp=False, \ nested_EU=False, just_bcase_no_hal=False, just_std=False, \ just_bcase_std=False, ver='1.6', res='4x5', \ debug=False): """ Dictionary storage for iGEOSChem most up to date (MUTD) runs. returns list of directories and titles """ if debug: print standard, sensitivity, titles, IO_obs, preindustrial, skip3,\ nested_EU, just_bcase_std, ver pwd = get_dir( 'rwd' ) l_dict= GC_var('latex_run_names') # Get version directories for versions d= { '4x5': { '1.6':'iGEOSChem_1.6_G5/', # '1.6':'iGEOSChem_1.6.1_G5/', '1.6.1':'iGEOSChem_1.6.1_G5/', '1.5': 'iGEOSChem_1.5_G5/' , '1.7': 'iGEOSChem_1.7_v10/'}, '2x2.5': { '1.6':'iGEOSChem_1.6_G5_2x2.5/' } }[res] d=d[ver] if sensitivity: l = [ 'no_hal', 'Just_Br', 'just_I', 'run', 'Just_I_org', 'I2Ox_double',\ 'I2Ox_half', 'het_double','het_half', 'no_het', 'Sulfate_up', \ 'MacDonald_iodide', 'I2Ox_phot_x2','I2Ox_phot_exp', 'no_I2Ox', \ 'BrO2pptv' ] r = [ d + i for i in l ] # adjust names to inc. repsin # if respun: # r = [ i+'.respun' for i in r ] if standard and (not any( [preindustrial,sensitivity, v10v92comp]) ): if just_bcase_std: l = [ 'Just_Br', 'run' ] # l = [ 'Just_Br', 'no_I2Ox' ] elif just_bcase_no_hal: l = [ 'no_hal', 'run' ] # l = [ 'no_hal', 'no_I2Ox' ] elif just_std: l = [ 'Just_Br' ] else: if ver == '1.7': # l = ['no_hal', 'run' ] l = ['Just_Br', 'run' ] else: l = ['no_hal', 'Just_Br', 'just_I', 'run' ] # if any( [ (ver ==i) for i in '1.5', '1.6' ] ) : r = [ d + i for i in l ] if nested_EU: d= 'iGEOS_Chem_1.6_G5_NPOINTS/' if just_bcase_std: l = [ 'Just_Br', 'run' ] else: l = [ 'no_hal', 'run' ] r = [ d+i for i in l ] # Setup latex titles list if titles and (not any( [preindustrial, v10v92comp]) ) and ( standard or \ sensitivity or nested_EU): l = [ l_dict[i] for i in l ] if v10v92comp: if just_bcase_std: l = [ 'Just_Br', 'run' ] else: l = [ 'no_hal', 'run' ] r= [ 'iGEOSChem_1.7_v10/'+ i for i in l ]+ \ [ 'iGEOS_Chem_1.6_G5/'+ i for i in l ] l= [ 'GEOS-Chem v10 (no hal)', 'Iodine Sim. (v10)'] + \ [ 'GEOS-Chem v9-2 (no hal)', 'Iodine Sim.'] if IO_obs: l = 'run_CVO', 'run_GRO', 'run_MAL', 'run_HAL', 'run_TOR_updated.respun' r = [d+ i for i in l] if no_I2Ox: r = [ i+'_no_I2Ox' for i in r ] if preindustrial: r = [ 'iGEOS_Chem_1.5_G5/no_hal', 'iGEOS_Chem_1.5_G5/run' , \ 'iGEOS_Chem_1.5_G5_preindustrial_no_hal/no_hal', \ 'iGEOS_Chem_1.5_G5_preindustrial/run' ] l = [ '(I-,Br-)', '(I+,Br+)' , '(I-,Br-) - 1750', '(I+,Br+) - 1750' ] if debug: print [pwd + i for i in r ], l if debug: [pwd + i for i in r ] if skip3: [ [ i.pop(0) for i in l, r ] for ii in range(3) ] if titles: return [pwd + i for i in r ], l else: return [pwd + i for i in r ] # -------------- # 5.02 - Store of constants for use by funcs/progs # -------------- def constants(input_x, debug=False): """ Dictionary storing commonly used constants """ con_dict ={ 'RMM_air' : ( .78*(2.*14.)+.22*(2.*16.) ) , 'AVG' : 6.0221413E23, 'mol2DU': 2.69E20 } return con_dict[input_x] # ---------------- Section 6 ------------------------------------------- # -------------- Dynamic processing of p/l # # ------------- # 6.01 - Extract reactions to form a dictionary of active reactions # ------------- def rxn_dict_from_smvlog( wd, PHOTOPROCESS=None, ver='1.7', LaTeX=False, debug=False ): """ build a dictionary reaction of reaction details from smv.log This can be used as an external call to analyse other prod/loss reactions through smvgear """ if isinstance( PHOTOPROCESS, type(None) ): PHOTOPROCESS = { '1.6' : 457, '1.7' : 467 }[ver] fn = 'smv2.log' if debug: print wd+'/'+fn file_ = open( wd+'/'+fn, 'rb' ) readrxn = False for row in file_: row = row.split() if 'NMBR' in row: readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' rxns = [ i for i in rxns if ( 'NMBR' not in i ) ] n = [int(rxn[0]) for rxn in rxns ] rxns = [rxn[1:] for rxn in rxns ] rdict = dict( zip(n, rxns) ) # --- Process to Latex if LaTeX: for rxn in sorted(rdict.keys() ): # -- Use Latex formatting? if rxn > PHOTOPROCESS-1: # xarstr = r' $\xrightarrow{hv}$ ' xarstr = r' + hv $\rightarrow$ ' else: xarstr = r' + M $\rightarrow$ ' # --- get all print on a per tag basis the coe, rxn str try: rxn_str = ''.join( rdict[rxn][4:] ) if LaTeX: rxn_str = rxn_str.replace('++=', xarstr) rxn_str = rxn_str.replace('+', ' + ') rxn_str = rxn_str.replace('+ =', r' $\rightarrow$ ' ) else: pass if debug: print rxn_str try: rxn_strs += [ rxn_str ] rxns += [ rxn ] except: rxn_strs = [ rxn_str ] rxns = [ rxn ] except: print '!'*100, 'ERROR HERE: {} {}'.format( rxn, rxn_str ) rdict = dict( zip(rxns, rxn_strs ) ) return rdict # ------------- # 6.02 - Extract reactions tracked by prod loss diag for a given p/l family # ------------- def rxns_in_pl( wd, spec='LOX', debug=False ): """ Extract reactions tracked by p/l family in smvgear """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) # if debug: if True: print file_ readrxn = False for row in file_: row = row.split() if all( [ i in row for i in 'Family','coefficient' , 'rxns', spec] ): readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- Check that rxns ahave been found? if len( rxns ) < 1: print 'ERROR: No rxns. found for >{}<, correct family?'.format( spec ) sys.exit(0) # -- remove 'Family' rxns = [ i for i in rxns if ( 'Family' not in i ) ] n = [int(rxn[1]) for rxn in rxns ] rxns = [rxn[2:] for rxn in rxns ] rdict = dict( zip(n, rxns) ) return rdict # ------------- # 6.03 - Extract reaction infomation for given tracer tags # ------------- def rxn4pl( pls, wd='example/example', rdict=None, reduce_size=True, \ debug=False ): """ Get information on reaction in smvgear from a provide reaction tag """ # --- Get Dict of reaction detail if debug: print 'rxn4pl called' if isinstance(rdict, type(None) ): # Get Dict of all reactions, Keys = #s rdict = rxn_dict_from_smvlog(wd) if debug: for i in rdict.keys(): if any( [ (s_ in ''.join( rdict[i] ) ) for s_ in pls ] ): print i, 'yes' # print rdict#.keys() # --- Indices for # reduce dict size if reduce_size: keys = [ i for i in rdict.keys() if \ any( [ (s_ in ''.join( rdict[i] ) ) for s_ in pls ]) ] # re-make dictionary rdict = dict( zip( keys, [rdict[i] for i in keys] )) # loop via pl keys = np.array([ [ pl, [ i for i in rdict.keys() \ if any( [ (pl in ''.join( rdict[i] ) ) ]) ][0] ] for pl in pls ]) # --- Return as reactions referenced by tag return dict( zip( keys[:,0], [ rdict[int(i)] for i in keys[:,1] ]) ) # ------------- # 6.04 - Construct a list of indicies for each fam from given tags # ------------- def get_indicies_4_fam( tags, fam=False, IO_BrOx2=False, rtnspecs=False, NOy_as_HOx=True, debug=False ): """ Return indicies (in list form) for in a given family """ # assign family # famsn = [ 'Photolysis','HOx','NOy' ,'Bromine', 'Iodine' ] famsn = [ 'Photolysis','HOx' ,'Bromine', 'Iodine' ] fams = [] for tag in tags: fams.append( get_tag_fam( tag) ) # if rm NOy family (treat as NOx) if NOy_as_HOx: fams = [x if (x!='NOy') else 'HOx' for x in fams] fd = dict(zip(tags, fams) ) ll = [] [ ll.append([]) for i in famsn ] for n, tag in enumerate( tags ) : for fn in range(len(famsn)): if fd[tag] == famsn[fn] : ll[fn].append( n) if fam: # Kludge - to allow for counting Ox loss via IO +BrO 50/50, # ( add extra loss tag. ) if IO_BrOx2: ll[famsn.index('Bromine')].append( max([max(i) for i in ll])+1 ) fams = fams +[ 'Bromine' ] tags = tags + [ 'LO3_24'] if rtnspecs: return ll, fams, tags else: return ll, fams else: return ll # ------------- # 6.05 - Get tags for reactions # ------------- def get_p_l_tags( rxns, debug=False): """ get p/l tags for a given smvgear reaction """ # (PD??, RD??, LO3_??, PO3_??, LR??) for rxn in rxns: if debug: print [ i for i in rxn if any( [x in i for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] tags = [i for i in rxn if any( [x in i for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] try: tagsl.append( tags) except: tagsl = [tags] return tagsl # ------------- # 6.06 - extract reactions tracked by prod loss diag in input.geos # ------------- def p_l_species_input_geos( wd, ver='1.7', rm_multiple_tagged_rxs=False ): """ Extract prod/loss species (input.geos) and reaction tags (globchem.dat) """ # find and open input.geos file fn = glob.glob(wd+'/*input.geos*')[0] file_ = open( fn, 'rb' ) # Read in just the prod loss section strs_in_1st_line = 'Number', 'of', 'P/L', 'families' section_line_divider = '------------------------+----------'+ \ '--------------------------------------------' readrxn = False for row in file_: row = row.split() # once at prod/loss section, start added to list if all( [ i in row for i in strs_in_1st_line ]): readrxn=True # if not at end of prod/loss section, add to list if section_line_divider in row: readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- Only consider 'Family' ( no headers e.g. 'families' ) rxns = [ i for i in rxns if ( 'families' not in i ) ] rxns = [ [ i.replace(':','') for i in r ] for r in rxns ] # Kludge, adjust for extra space 12-99 # ( This is no longer required for 1.7 + ) if ver == '1.6': [i.pop(0) for i in rxns if ('th' not in i[0]) ] # Extract just PD (input.geos) and vars (globchem.dat vars ) PD = [rxn[4] for rxn in rxns ] vars = [rxn[5:] for rxn in rxns ] # remove p/l with muliple values ( start from 12th input) - Kludge? if rm_multiple_tagged_rxs: PD, vars = [ i[11:] for i in PD, vars ] vars = [ i[0] for i in vars ] return PD, vars # ------------- # 6.07 - extract all active tags from smv.log # ------------- def tags_from_smvlog( wd ): #, spec='LOX' ): """ Get all active p/l tags in smvgear ( from smv2.log ) """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) readrxn = False for row in file_: row = row.split() if all( [(i in row) for i in ['NBR', 'NAME', 'MW', 'BKGAS(VMRAT)'] ]): readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' rxns = [ i for i in rxns if ( 'NBR' not in i ) ] rxns = [rxn[1] for rxn in rxns ] # --- only consider tags return [i for i in rxns if any( [x in i \ for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] # ------------- # 6.08 - extract all active PDs from smv.log # ------------- def PDs_from_smvlog( wd, spec='LOX' ): """ Get all active PDs tags in smvgear ( from smv2.log ) """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) readrxn = False leniency = 0 entries_in_title = ['Families', 'for', 'prod', 'or', 'loss', 'output:'] for row in file_: row = row.split() if all( [(i in row) for i in entries_in_title ]): readrxn=True leniency = 1 if len(row) < 1 : if leniency < 0: readrxn=False leniency -= 1 if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' exceptions = [ 'SPECIES', '===============================================================================','Families'] rxns = [ i for i in rxns if all( [ (ii not in i) for ii in exceptions ]) ] rxns = [j for k in rxns for j in k ] return rxns # ------------- # 6.09 - Give all reactions tag is active within # ------------- def rxns4tag( tag, rdict=None, ver='1.7', wd=None ): """ get a list of all reactions with a given p/l tag """ # --- get reaction dictionary if rdict == None: rdict = rxn_dict_from_smvlog( wd, ver=ver ) # --- Caveats - to adapt for long line errors in fortran written output errs = ['LO3_36'] cerrs = ['RD95'] if any([ (tag == i) for i in errs ] ): tag = cerrs[ errs.index( tag) ] # -- loop reactions, if tag in reaction return reaction rxns = [] for n, rxn in enumerate( rdict.values() ): # if any( [tag in i for i in rxn]): if any( [(i.endswith(tag) ) for i in rxn]): rxns.append( [rdict.keys()[n] ]+ rxn ) return rxns # ------------- # 6.10 - get details for a given tag # ------------- def get_tag_details( wd, tag=None, PDs=None, rdict=None, \ PHOTOPROCESS=None, ver='1.7', LaTeX=False, print_details=False, \ debug=False ): """ Retriveve prod/loss tag details from smv.log ( rxn number + reaction description)""" # what is the number of the first photolysis reaction? if isinstance( PHOTOPROCESS, type(None) ): PHOTOPROCESS = { '1.6' : 457, '1.7' : 467 }[ver] # --- get all reactions tags are active in smv.log if rdict == None: rdict = rxn_dict_from_smvlog( wd, ver=ver ) trxns = rxns4tag( tag, wd=wd, rdict=rdict ) # --- get all print on a per tag basis the coe, rxn str try: rxn_str = ''.join( trxns[0][5:9]) # -- Use Latex formatting? # if LaTeX: # # setup latex arrows and replace existing arrows # if trxns[0][0] > PHOTOPROCESS-1: # xarstr = r' $\xrightarrow{hv}$ ' # else: # xarstr = r' $\xrightarrow{M}$ ' # rxn_str = rxn_str.replace('++=', xarstr).replace('+', ' + ') # rxn_str = rxn_str.replace('+ =', r' $\rightarrow$ ' ) # else: # pass if debug: print rxn_str dets = [ tag, trxns[0][0], rxn_str ] except: print '!'*100, 'ERROR HERE: {} {}'.format( tag, trxns ) if print_details: print dets # --- return a dictionary of all active tagged reactions details by tag : PD, number, rxn str, coeeffiecn else: return dets # ------------- # 6.11 - Takes a reaction number add gives the Ox Coe # ------------- def get_rxn_Coe(wd, num, tag, nums=None, rxns=None, tags=None, \ Coe=None, spec='LOX', debug=False): """ Retrieve given reaction coefficient for smvgear (from smv2.log) """ # --- get dictionaries for reactions within if all( [ (i == None) for i in nums, rxns, tags, Coe ] ): nums, rxns, tags, Coe = prod_loss_4_spec( wd, spec, all_clean=True ) # Pull reaction coefficient Coe_dict = dict(zip(nums, Coe) ) Coe = float(Coe_dict[ num ]) if ('P' not in tag ): # consider all change positive - Kludge due to the assignment approach. Coe = Coe*-1.0 # --- over write Ox in species with prod_mod_tms values try: Coe = Ox_in_species(tag, rxns=True) if debug: print 'using values from Ox_in_species' except: if debug: print 'using values from smv.log @: {}'.format(wd) return Coe # -------------- Section 7 ------------------------------------------- # -------------- Observational Variables # # 5.02 - obs sites (e.g. deposition locs, lons, alts ) # 5.04 - Store site locations ( LAT, LON, ALT) # -------------- # 7.01 - open ocean IO data # -------------- def IO_obs_data(just_IO=False): """ Dictionary of open ocean IO observations for automated comparisons key = ref (e.g. name_year ) values = alt, lon, lat, times, full_name , IO (avg), BrO (avg), CAM-Chem IO, CAM-Chem BrO, group """ IO_obs_dict={'Read_2008': [0.0, -24.87, 16.85, 6.0, 'Read, 2008', '1', '2', '1', '2', 'Leeds'], 'Weddel_Sea': [0.03, -50.0, 75.0, 10.0, 'Weddel Sea', '-', '-', '-', '-', '-'], 'Oetjen_2009': [0.0, 73.5, 5.0, 2.0, 'Oetjen, 2009', '2.4', '-', '1', '-', 'Leeds'], 'Jones_2010_II': [0.0, -19.0, 30.0, 7.0, 'Jones, 2010, RHaMBLe II (26-36N)', '-', '-', '-', '-', '-'], 'Leser_2003': [0.0, -24.87, 16.85, 10.0, 'Leser, 2003', '-', '3.6', '-', '0.8', 'Heidelberg'], 'Jones_2010_I': [0.0, -19.0, 20.0, 7.0, 'Jones, 2010, RHaMBLe I (15-25N)', '-', '-', '-', '-', '-'], 'Halley_BAS': [0.03, -26.34, 75.35, 10.0, 'Halley, BAS', '-', '-', '-', '-', '-'], 'Theys_2007': [0.0, -20.9, -55.5, 8.0, 'Theys, 2007', '-', '<0.5', '-', '0.8', 'Belgium'], 'Dix_2013': [9.0, -160.0, 10.0, 1.0, 'Dix, 2013', '0.1', '-', '', '-', 'NCAR'], 'Allan_2000': [0.162, -16.6, 28.4, 6.0, 'Allan, 2000', '1.2', '-', '0.4', '-', 'Leeds'], 'Jones_2010_MAP': [0.0, -10.0, 55.0, 6.0, 'Jones, 2010, MAP', '-', '-', '-', '-', '-'], 'Grobmann_2013': [0.0, 150.0, 15.0, 1.0, 'Grobmann, 2013', '0.72-1.8', '-', '-', '-', 'Heidelberg'], 'Dix_2013_j_comp': [9.0, -160.0, 10.0, 5.0, 'Dix, 2013 (wrong month to allow with jones runs comparisons)', '-', '-', '-', '-', '-'], 'schonart_2009_II': [0.0, -80.0, -20.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Martin_2009': [0.0, -24.87, 16.85, 2.0, 'Martin, 2009', '-', '<3.0', '-', '1.2', 'Heidelberg'], 'schonart_2009_I': [0.0, -90.0, -5.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Yokouchi_2013': [0.0, 120.0, 15.0, 2.0, 'Yokouchi, 2013', '-', '-', '-', '-', '-'], 'Butz_2009': [0.045, -44.4, -2.4, 12.0, 'Butz, 2009', '0.1', '~1.0', '0.02', '0.5', 'Leeds']} IO_obs_dict_just = {'Read_2008': [0.0, -24.87, 16.85, 6.0, 'Read, 2008', '1', '2', '1', '2', 'Leeds'], 'schonart_2009_II': [0.0, -80.0, -20.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Oetjen_2009': [0.0, 73.5, 5.0, 2.0, 'Oetjen, 2009', '2.4', '-', '1', '-', 'Leeds'], 'Allan_2000': [0.162, -16.6, 28.4, 6.0, 'Allan, 2000', '1.2', '-', '0.4', '-', 'Leeds'], 'Leser_2003': [0.0, -24.87, 16.85, 10.0, 'Leser, 2003', '-', '3.6', '-', '0.8', 'Heidelberg'], 'Theys_2007': [0.0, -20.9, -55.5, 8.0, 'Theys, 2007', '-', '<0.5', '-', '0.8', 'Belgium'], 'Dix_2013': [9.0, -160.0, 10.0, 1.0, 'Dix, 2013', '0.1', '-', '', '-', 'NCAR'], 'Grobmann_2013': [0.0, 135.0, 15.0, 1.0, 'Grobmann, 2013', '1.095', '-', '-', '-', 'Heidelberg'], 'schonart_2009_I': [0.0, -90.0, -5.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Martin_2009': [0.0, -24.87, 16.85, 2.0, 'Martin, 2009', '-', '<3.0', '-', '1.2', 'Heidelberg'], 'Butz_2009': [0.045, -44.4, -2.4, 12.0, 'Butz, 2009', '0.1', '~1.0', '0.02', '0.5', 'Leeds'], 'Mahajan_2010': [0., -100, -10, 4.0, 'Mahajan, 2010', '0.58', '-', '-', '-', 'Leeds'] } if (just_IO): return IO_obs_dict_just else: return IO_obs_dict # -------------- # 7.02 - BAE flight ID to date # ------------- def bae_flight_ID_2_date( f_ID, debug=False): """ BAE flight flight ID to date for CAST campaign """ if (debug): print 'bae_flight_ID_2_date called' f_ID_dict = {'847':'2014-02-18','846':'2014-02-17','845':'2014-02-17', '844':'2014-02-16','843':'2014-02-15','842':'2014-02-17','840':'2014-02-13','839':'2014-02-12','838':'2014-02-05','837':'2014-02-04','836':'2014-02-04','835':'2014-02-03','834':'2014-02-01','833':'2014-02-01','832':'2014-01-30','831':'2014-01-30','830':'2014-01-30','829':'2014-01-28','828':'2014-01-26','827':'2014-01-26','826':'2014-01-25','825':'2014-01-24','824':'2014-01-21','823':'2014-01-18'} return f_ID_dict[f_ID] # -------------- # 7.03 - details on flights from FAAM's NETCDF core data files, # -------------- def CAST_flight(all=True, CIMS=False, CIMSII=False): """ Callable dictionary of CAST flight details removed 'CAST_flight, 'b822': '20140108'' """ flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '1932', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140121', '1956', '20140122', '0413', 1390334165, 1390363990], 'b825': ['20140124', '1940', '20140125', '0140', 1390592404, 1390614058], 'b826': ['20140125', '0128', '20140125', '0637', 1390613301, 1390631839], 'b827': ['20140126', '0041', '20140126', '0445', 1390696903, 1390711511], 'b823': ['20140118', '1713', '20140119', '0147', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '1901', '20140205', '0318', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0627', 1391477699, 1391495264], 'b835': ['20140203', '2101', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0610', '20140201', '1217', 1391235033, 1391257044], 'b833': ['20140201', '0045', '20140201', '0606', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1131', 1391062943, 1391081485], 'b831': ['20140130', '0101', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0239', '20140129', '0830', 1390963178, 1390984251], 'b846': ['20140217', '2058', '20140218', '0510', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '1857', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0753', 1392607430, 1392623590], 'b842': ['20140214', '0508', '20140214', '1035', 1392354530, 1392374130], 'b843': ['20140215', '1903', '20140216', '0400', 1392490986, 1392523256], 'b840': ['20140213', '0005', '20140213', '0658', 1392249904, 1392274686], 'b841': ['20140214', '0004', '20140214', '0458', 1392336265, 1392353901]} if (CIMS): flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '2322', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140122', '0155', '20140122', '0411', 1390334165, 1390363990], 'b825': ['20140124', '2230', '20140125', '0034', 1390592404, 1390614058], 'b826': ['20140125', '0128', '20140125', '0637', 1390613301, 1390631839], 'b827': ['20140126', '0041', '20140126', '0445', 1390696903, 1390711511], 'b823': ['20140118', '2006', '20140119', '0055', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '2333', '20140205', '0315', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0608', 1391477699, 1391495264], 'b835': ['20140203', '2316', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0610', '20140201', '1220', 1391235033, 1391257044], 'b833': ['20140201', '0145', '20140201', '0606', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1123', 1391062943, 1391081485], 'b831': ['20140130', '0201', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0249', '20140129', '0825', 1390963178, 1390984251], 'b846': ['20140217', '2058', '20140218', '0510', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '1857', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0750', 1392607430, 1392623590], 'b842': ['20140214', '0611', '20140214', '1011', 1392354530, 1392374130], 'b843': ['20140215', '2227', '20140216', '0324', 1392490986, 1392523256], 'b840': ['20140213', '0304', '20140213', '0648', 1392249904, 1392274686], 'b841': ['20140214', '0004', '20140214', '0456', 1392336265, 1392353901]} if (CIMSII): flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '2322', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140122', '0155', '20140122', '0411', 1390334165, 1390363990], 'b825': ['20140124', '2230', '20140125', '0034', 1390592404, 1390614058], 'b826': ['20140125', '0141', '20140125', '0619', 1390613301, 1390631839], 'b827': ['20140126', '0100', '20140126', '0442', 1390696903, 1390711511], 'b823': ['20140118', '2006', '20140119', '0055', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '2333', '20140205', '0315', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0608', 1391477699, 1391495264], 'b835': ['20140203', '2338', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0619', '20140201', '1210', 1391235033, 1391257044], 'b833': ['20140201', '0223', '20140201', '0558', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1123', 1391062943, 1391081485], 'b831': ['20140130', '0201', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0249', '20140129', '0825', 1390963178, 1390984251], 'b846': ['20140218', '0035', '20140218', '0507', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '2224', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0750', 1392607430, 1392623590], 'b842': ['20140214', '0611', '20140214', '1011', 1392354530, 1392374130], 'b843': ['20140215', '2227', '20140216', '0324', 1392490986, 1392523256], 'b840': ['20140213', '0342', '20140213', '0648', 1392249904, 1392274686], 'b841': ['20140214', '0050', '20140214', '0456', 1392336265, 1392353901]} if (all) : return flight_dict # -------------- # 7.04 - Iodocarbon obs. meta data # -------------- def iodocarbon_obs(): """ dictionary of Iodocarbon observations for automated analysis/comparions with observations """ Org_obs= { 'CH3IT' : [ \ ['Chuck et al (2005)' , list(np.linspace( -36, -49, 3) ) + \ list( np.linspace( -20, 28, 3) ), [-20] *6, [0.71]*3 + [ 1.94]*3 ] ] , 'CH2ICl' : [ \ ['Chuck et al (2005)' , list( np.linspace( -36, -49, 3 ) )+ \ list(np.linspace( -20, 28, 3) ), [-20] *6, [0.23]*3+ [0.32]*3 ],\ ['Jones et al (2010)' , np.linspace( 60, 15, 5), [-15] *5, [0.12]*5 ] ], 'CH2I2' : [ \ ['Jones et al (2010)' , np.linspace( 60,15, 5), [-15] *5, [0.01]*5 ] ], 'CH2IBr' : [ \ ['Jones et al (2010)' , np.linspace( 60, 15, 5), [-15] *5, [0.01]*5 ] ], 'C2H5I' : [ \ ['Jones et al (2010)' , np.linspace( 26, 36, 3), [120] *3, [0.09]*3 ] ], 'I2' : [ ['Lawler et al (2014)' , [16.51] , [-24], [0.2] ] ], } return Org_obs # -------------- # 7.05 - Stores locations for use by funcs/progs - # -------------- def get_loc(loc=None, rtn_dict=False, debug=False): """ Dictionary to store locations for automated analysis Data arranged: LON, LAT, ALT - double up? ( with 5.02 ?? ) """ loc_dict ={ 'GUAM' : ( 144.800, 13.500, 0 ), 'CHUUK' : ( 151.7833, 7.4167, 0 ), 'PILAU' : ( 134.4667,7.3500, 0 ), 'London': ( -0.1275, 51.5072, 0 ), 'Weyborne' : (1.1380, 52.9420, 0 ), # 'Cape Verde': (16.848, -24.871, 0 ), # 'CVO': (16.848, -24.871, 0 ), 'Cape Verde': ( -24.871, 16.848, 0 ), 'CVO': (-24.871,16.848, 0 ), 'North Ken' : (-0.214174, 51.520718, 0), 'KEN' : (-0.214174, 51.520718, 0), 'BT tower': (-0.139055, 51.521556, 190), 'BTT': (-0.139055, 51.521556, 190) } if rtn_dict: return loc_dict else: return loc_dict[loc] # -------------- # 7.06 - Get Locations of observations (lats, lons, alts ) for given sites # -------------- def get_obs_loc(loc, debug=False): """ Dictionary to store groups of locations for automated analysis """ d = { 'Denmark' :[ [ 68.35, 59.85, 56.13, 55.69 ], [ 18.81, 17.63, 13.05, 12.10 ] ], # split by obs site... 'Denmark1': [[68.35], [18.81]], 'Denmark2': [[59.85], [17.63]], 'Denmark3': [[56.13], [13.05]], 'Denmark4': [[55.69], [12.1]] , # Berlin, bonn, hamburg, Westerland, Munich, Brotjacklriegel, Deuselbach, Schauinsland 'Germany' :[ [52.5167, 50.7340, 53.5653,54.9100, 52.4231, 48.1333, \ 48.491, 49.4508, 47.91111 ] , [ 13.3833 , 7.0998, 10.0014, 8.3075, 10.7872, 11.5667, 13.133, 7.302, \ 7.8894] ], 'Weyborne' :[ [ 52.9420], [1.1380 ] ] } return d[loc] # -------------- # 7.07 - sonde station variables (list of 432 sondes) # ------------- def sonde_STNs(): """ Dictionary of WOUDC sonde location variables """ sonde_dict = { 101: ['SYOWA', 101.0, -69.0, 39.58, 22.0, 'JPN', 'ANTARCTICA'], 104: ['BEDFORD', 104.0, 42.45, -71.267, 80.0, 'USA', 'IV'], 105: ['FAIRBANKS (COLLEGE)', 105.0, 64.817, -147.867, 138.0, 'USA', 'IV'], 107: ['WALLOPS ISLAND', 107.0, 37.898, -75.483, 13.0, 'USA', 'IV'], 108: ['CANTON ISLAND', 108.0, -2.76, -171.7, 3.0, 'USA', 'V'], 109: ['HILO', 109.0, 19.5735, -155.0485, 11.0, 'USA', 'V'], 111: ['AMUNDSEN-SCOTT (SOUTH POLE)', 111.0, -89.983, 0.0, 2820.0, 'ATA', 'ANTARCTICA'], 131: ['PUERTO MONTT', 131.0, -41.45, -72.833, 5.0, 'CHL', 'III'], 132: ['SOFIA', 132.0, 42.817, 23.383, 588.0, 'BGR', 'VI'], 137: ['TOPEKA', 137.0, 39.067, -95.633, 270.0, 'USA', 'IV'], 138: ['CHRISTCHURCH', 138.0, -43.483, 172.55, 34.0, 'NZL', 'V'], 149: ['OVEJUYO (LA PAZ)', 149.0, -16.517, -68.033, 3420.0, 'BOL', 'III'], 156: ['PAYERNE', 156.0, 46.49, 6.57, 491.0, 'CHE', 'VI'], 157: ['THALWIL', 157.0, 46.817, 8.455, 515.0, 'CHE', 'VI'], 163: ['WILKES', 163.0, -66.25, 110.517, 12.0, 'USA', 'ANTARCTICA'], 174: ['LINDENBERG', 174.0, 52.21, 14.12, 112.0, 'DEU', 'VI'], 175: ['NAIROBI', 175.0, -1.267, 36.8, 1745.0, 'KEN', 'I'], 181: ['BERLIN/TEMPLEHOF', 181.0, 52.467, 13.433, 50.0, 'DEU', 'VI'], 187: ['PUNE', 187.0, 18.553, 73.86, 559.0, 'IND', 'II'], 190: ['NAHA', 190.0, 26.2, 127.683, 27.0, 'JPN', 'II'], 191: ['SAMOA', 191.0, -14.25, -170.56, 82.0, 'ASM', 'V'], 194: ['YORKTON', 194.0, 51.263, -102.467, 504.0, 'CAN', 'IV'], 197: ['BISCARROSSE/SMS', 197.0, 44.367, -1.233, 18.0, 'FRA', 'VI'], 198: ['COLD LAKE', 198.0, 54.783, -110.05, 702.0, 'CAN', 'IV'], 199: ['BARROW', 199.0, 71.317, -156.635, 11.0, 'USA', 'IV'], 203: ['FT. SHERMAN', 203.0, 9.33, -79.983, 57.0, 'PAN', 'IV'], 205: ['THIRUVANANTHAPURAM', 205.0, 8.483, 76.97, 60.0, 'IND', 'II'], 206: ['BOMBAY', 206.0, 19.117, 72.85, 145.0, 'IND', 'II'], 210: ['PALESTINE', 210.0, 31.8, -95.717, 121.0, 'USA', 'IV'], 213: ['EL ARENOSILLO', 213.0, 37.1, -6.733, 41.0, 'ESP', 'VI'], 217: ['POKER FLAT', 217.0, 65.133, -147.45, 357.5, 'USA', 'IV'], 219: ['NATAL', 219.0, -5.71, -35.21, 30.5, 'BRA', 'III'], 221: ['LEGIONOWO', 221.0, 52.4, 20.967, 96.0, 'POL', 'VI'], 224: ['CHILCA', 224.0, -12.5, -76.8, -1.0, 'PER', 'III'], 225: ['KOUROU', 225.0, 5.333, -52.65, 4.0, 'GUF', 'III'], 227: ['MCDONALD OBSERVATORY', 227.0, 30.666, -90.933, 2081.0, 'USA', 'IV'], 228: ['GIMLI', 228.0, 50.633, -97.05, 228.0, 'CAN', 'IV'], 229: ['ALBROOK', 229.0, 8.983, -79.55, 66.0, 'PAN', 'IV'], 231: ['SPOKANE', 231.0, 47.667, -117.417, 576.0, 'USA', 'IV'], 233: ['MARAMBIO', 233.0, -64.233, -56.623, 196.0, 'ATA', 'ANTARCTICA'], 234: ['SAN JUAN', 234.0, 18.483, -66.133, 17.0, 'PRI', 'IV'], 235: ['LONG VIEW', 235.0, 32.5, -94.75, 103.0, 'USA', 'IV'], 236: ['COOLIDGE FIELD', 236.0, 17.283, -61.783, 10.0, 'ATG', 'IV'], 237: ['GREAT FALLS', 237.0, 47.483, -111.35, 1118.0, 'USA', 'IV'], 238: ['DENVER', 238.0, 39.767, -104.883, 1611.0, 'USA', 'IV'], 239: ['SAN DIEGO', 239.0, 32.76, -117.19, 72.5, 'USA', 'IV'], 242: ['PRAHA', 242.0, 50.02, 14.45, 304.0, 'CZE', 'VI'], 254: ['LAVERTON', 254.0, -37.867, 144.75, 21.0, 'AUS', 'V'], 255: ['AINSWORTH (AIRPORT)', 255.0, 42.583, -100.0, 789.0, 'USA', 'IV'], 256: ['LAUDER', 256.0, -45.03, 169.683, 370.0, 'NZL', 'V'], 257: ['VANSCOY', 257.0, 52.115, -107.165, 510.0, 'CAN', 'IV'], 260: ['TABLE MOUNTAIN (CA)', 260.0, 34.4, -117.7, 2286.0, 'USA', 'IV'], 262: ['SODANKYLA', 262.0, 67.335, 26.505, 179.0, 'FIN', 'VI'], 265: ['IRENE', 265.0, -25.91, 28.211, 1524.0, 'ZAF', 'I'], 280: ['NOVOLASAREVSKAYA / FORSTER', 280.0, -70.767, 11.867, 110.0, 'ATA', 'ANTARCTICA'], 297: ['S.PIETRO CAPOFIUME', 297.0, 44.65, 11.617, 11.0, 'ITA', 'VI'], 303: ['IQALUIT', 303.0, 63.75, -68.55, 20.0, 'CAN', 'IV'], 308: ['MADRID / BARAJAS', 308.0, 40.46, -3.65, 650.0, 'ESP', 'VI'], 315: ['EUREKA / EUREKA LAB', 315.0, 80.04, -86.175, 310.0, 'CAN', 'IV'], 316: ['DE BILT', 316.0, 52.1, 5.18, 4.0, 'NLD', 'VI'], 318: ['VALENTIA OBSERVATORY', 318.0, 51.93, -10.25, 14.0, 'IRL', 'VI'], 323: ['NEUMAYER', 323.0, -70.65, -8.25, 42.0, 'ATA', 'ANTARCTICA'], 328: ['ASCENSION ISLAND', 328.0, -7.98, -14.42, 91.0, 'SHN', 'I'], 329: ['BRAZZAVILLE', 329.0, -4.28, 15.25, 314.0, 'COG', 'I'], 330: ['HANOI', 330.0, 21.033, 105.84, 5.0, 'VNM', 'II'], 333: ['PORTO NACIONAL', 333.0, -10.8, -48.4, 240.0, 'BRA', 'III'], 334: ['CUIABA', 334.0, -15.6, -56.1, 990.0, 'BRA', 'III'], 335: ['ETOSHA PAN', 335.0, -19.2, 15.9, 1100.0, 'NAM', 'I'], 336: ['ISFAHAN', 336.0, 32.477, 51.425, 1550.0, 'IRN', 'II'], 338: ['BRATTS LAKE (REGINA)', 338.0, 50.205, -104.705, 592.0, 'CAN', 'IV'], 339: ['USHUAIA', 339.0, -54.85, -68.308, 15.0, 'ARG', 'III'], 344: ['HONG KONG OBSERVATORY', 344.0, 22.31, 114.17, 66.0, 'HKG', 'II'], 348: ['ANKARA', 348.0, 39.95, 32.883, 896.0, 'TUR', 'VI'], 360: ['PELLSTON (MI)', 360.0, 45.56, -84.67, 238.0, 'USA', 'IV'], 361: ['HOLTVILLE (CA)', 361.0, 32.81, -115.42, -18.0, 'USA', 'IV'], 394: ['BROADMEADOWS', 394.0, -37.6914, 144.9467, 108.0, 'AUS', 'V'], 400: ['MAITRI', 400.0, -70.46, 11.45, 223.5, 'ATA', 'ANTARCTICA'], 401: ['SANTA CRUZ', 401.0, 28.42, -16.26, 36.0, 'ESP', 'I'], 404: ['JOKIOINEN', 404.0, 60.81, 23.5, 103.0, 'FIN', 'VI'], 406: ['SCORESBYSUND', 406.0, 70.49, -21.98, 50.0, 'GRL', 'VI'], 418: ['HUNTSVILLE', 418.0, 34.72, -86.64, 196.0, 'USA', 'IV'], 420: ['BELTSVILLE (MD)', 420.0, 39.02, -76.74, 64.0, 'USA', 'IV'], 432: ['PAPEETE (TAHITI)', 432.0, -18.0, -149.0, 2.0, 'PYF', 'V'], 434: ['SAN CRISTOBAL', 434.0, -0.92, -89.6, 8.0, 'ECU', 'III'], 435: ['PARAMARIBO', 435.0, 5.81, -55.21, 22.5, 'SUR', 'III'], 436: ['LA REUNION ISLAND', 436.0, -20.99, 55.48, 61.5, 'REU', 'I'], 437: ['WATUKOSEK (JAVA)', 437.0, -7.57, 112.65, 50.0, 'IDN', 'V'], 438: ['SUVA (FIJI)', 438.0, -18.13, 178.315, 6.0, 'FJI', 'V'], 439: ['KAASHIDHOO', 439.0, 5.0, 73.5, 1.0, 'MDV', 'V'], 441: ['EASTER ISLAND', 441.0, -27.17, -109.42, 62.0, 'CHL', 'III'], 443: ['SEPANG AIRPORT', 443.0, 2.73, 101.7, 17.0, 'MYS', 'V'], 444: ['CHEJU', 444.0, 33.5, 126.5, 300.0, 'KOR', 'II'], 445: ['TRINIDAD HEAD', 445.0, 40.8, -124.16, 55.0, 'USA', 'IV'], 448: ['MALINDI', 448.0, -2.99, 40.19, -6.0, 'KEN', 'I'], 450: ['DAVIS', 450.0, -68.577, 77.973, 16.0, 'ATA', 'ANTARCTICA'], 456: ['EGBERT', 456.0, 44.23, -79.78, 253.0, 'CAN', 'IV'], 457: ['KELOWNA', 457.0, 49.93, -119.4, 456.0, 'CAN', 'IV'], 458: ['YARMOUTH', 458.0, 43.87, -66.1, 9.0, 'CAN', 'IV'], 459: ['TBD', 459.0, 0.0, 0.0, 0.0, '', 'VI'], 460: ['THULE', 460.0, 76.53, -68.74, 57.0, 'GRL', 'VI'], 466: ['MAXARANGUAPE (SHADOZ-NATAL)', 466.0, -5.445, -35.33, 32.0, 'BRA', 'III'], 472: ['COTONOU', 472.0, 6.21, 2.23, 10.0, 'BEN', 'I'], 477: ['HEREDIA', 477.0, 10.0, -84.11, 1176.0, 'CRI', 'IV'], 480: ['SABLE ISLAND', 480.0, 43.93, -60.02, 4.0, 'CAN', 'IV'], 482: ['WALSINGHAM', 482.0, 42.6, -80.6, 200.0, 'CAN', 'IV'], 483: ['BARBADOS', 483.0, 13.16, -59.43, 32.0, 'BRB', 'III'], 484: ['HOUSTON (TX)', 484.0, 29.72, -95.4, 19.0, 'USA', 'IV'], 485: ['TECAMEC (UNAM)', 485.0, 19.33, -99.18, 2272.0, 'MEX', 'IV'], 487: ['NARRAGANSETT', 487.0, 41.49, -71.42, 21.0, 'USA', 'IV'], 488: ['PARADOX', 488.0, 43.92, -73.64, 284.0, 'USA', 'IV'], 489: ['RICHLAND', 489.0, 46.2, -119.16, 123.0, 'USA', 'IV'], 490: ['VALPARAISO (IN)', 490.0, 41.5, -87.0, 240.0, 'USA', 'IV'], 494: ['ALAJUELA', 494.0, 9.98, -84.21, 899.0, 'CRI', 'IV'] } return sonde_dict # ---- # 7.08 - returns (lat, lon, alt (press), timezone (UTC) ) for a given site # ---- def gaw_2_loc(site, f = 'GLOBAL_SURFACE_O3_2006_2012.nc'):#, f """ Extract GAW site locations for a given site Another file is availible with just GAW sites: 'GAW_SURFACE_O3_2006_2012.nc' """ from AC_tools.funcs4generic import hPa_to_Km # Use simple dictionary if site listed try: gaw_sites= { 'SMO': (-14.247, -170.565,1002.7885270480558, -11), 'MNM':(24.285, 153.981, 1011.9342452324959, 9), 'BMW':(32.27, -64.88, 1008.6109830510485, -4 ) ,'CVO': (16.848, -24.871, 1011.6679817831093, -1), 'RPB':(13.17000, -59.43000, 1007.0196960034474, -4 ), 'ogasawara': (26.38, 142.10,996.08181619552602, 9 ), 'OGA': (26.38, 142.10,996.08181619552602, 9 ) , # Add extras for ease of analysis (e.g. Roscoff ... ) 'ROS': (48.433, -3.5904, 1011.6679817831093, +1) } return gaw_sites[ site ] # If not in list then extract details from NetCDF except: wd= get_dir('dwd') +'ozonesurface/' with Dataset(wd+f, 'r', format='NETCDF4') as f: lon= f.groups[site].longitude alt = f.groups[site].altitude /1E3 lat = f.groups[site].latitude print [ (i, type(i) ) for i in lat, lon, alt ] return (lat, lon, float( hPa_to_Km([alt], reverse=True)[0] ), -9999 ) rm platform import where not needed # ================================================= # --------- tms - module of Variables for re-use---------------- # -------------- # Section 0 - Required modules # Section 1 - Planeflight variables # Section 2 - Drivers functions # Section 3 - GeosChem (bpch) prod loss variables # Section 4 - GeosChem (bpch) general variables # Section 5 - Misc # Section 6 - Dynamic p/l processing # Section 7 - Obervational variables # ---------------------------- ------------- ------------- ------------- # -------------- Contents # --------------- ------------- ------------- ------------- # ---- Section 0 ----- Required modules # --------------- ------------- ------------- ------------- # ---- Section 1 ----- Planeflight variables # 1.01 - PF variable dictionary *** # 1.02 - TRA_?? to Geos-Chem species name *** # --------------- ------------- ------------- ------------- # ---- Section 2 ----- Drivers functions # 2.01 - P/L tag to PD tag *** # 2.02 - Get P/L dictionary for a given family *** # 2.03 - Get P/L dictionary for a given species *** # --------------- ------------- ------------- ------------- # ---- Section 3 ----- GeosChem (bpch) prod loss variables # 3.01 - Spec to photolysis reaction p/l tag # 3.02 - Ox family for tag # --------------- ------------- ------------- ------------- # ---- Section 4 ----- GeosChem (bpch) general variables # 4.01 - v9-2 species in input.geos from num # 4.02 - Get Species Mass # 4.03 - Get Species stoichiometry # 4.04 - GEOS-Chem/ctm.bpch values (current main dict ) *** # 4.05 - latex species name # 4.06 - converts P/L tracer mulitpler to 1 # 4.07 - Returns tracers unit and scale (if requested) # 4.08 - Store of dirs for earth0, atmosviz1, and tms mac # 4.09 - Ox in species (redundant now? should adapt species stoich ) # 4.10 - Get Gaw site name from GAW ID # 4.11 - returns dict of gaw sites # 4.12 - Return lat, lon, alt for a given resolution # 4.13 - Get model array dimension for a given resolution # 4.14 - Convert gamap category/species name to Iris/bpch name # 4.99 - Reference data, (inc. grid data) from gchem # --------------- ------------- ------------- ------------- # ---- Section 5 ----- Misc # 5.01 - dir store (standard directories on different servers ) # 5.02 - Store of constants for use by funcs/progs # --------------- ------------- ------------- ------------- # ---- Section 6 ----- Dynamic prod/loss dictionary processing ( For GEOS-Chem) # 6.01 - Make rxn dict of all active reactions *** # 6.02 - Return all reactions for a given p/l family *** # 6.03 - Return reaction infomaiton for given tags (e.g. PD... ) # 6.04 - Create an indices list to split reaction by family (e.g. for Ox loss) # 6.05 - Return tags for a given reaction # 6.06 - Extract all p/l speacies in a given input.geos # 6.07 - Extract all active tags from a given smv.log # 6.08 - Extract all active PDs from a given smv.log # 6.09 - get all active reaction for a given tag # 6.10 - get all details for a given tag # 6.11 - get reaction coeffifecent # --------------- ------------- ------------- ------------- # ---- Section 7 ----- Observational variables # 7.01 - IO observation dictionary # 7.02 - BAE flight ID dictionary # 7.03 - CAST flight dictionary for CIMS/CIMSII # 7.04 - Iodocarbon obs. meta data # 7.05 - Stores locations for use by funcs/progs - LON, LAT, ALT - double up? ( with 5.02 ?? ) # 7.06 - Get Locations of observations (lats, lons, alts ) for given sites # 7.07 - sonde station variables (list of 432 sondes) # 7.08 - returns (lat, lon, alt (press), timezone (UTC) ) for a given site # ------------------ Section 0 ----------------------------------- # -------------- Required modules: # #!/usr/bin/python # # -- I/O / Low level import re #import platform import pandas as pd from netCDF4 import Dataset #import Scientific.IO.NetCDF as S import sys import glob # - Math/Analysis import numpy as np # - tms from AC_tools.funcs4core import * # ------------------------------------------- Section 1 ------------------------------------------- # -------------- Planeflight variables # # -------------- # 1.01 - dictionary of variables used for planeflight_mod.F output # ------------- def pf_var( input, ver='1.7', ntracers=85, JREAs=[] ): # planeflight variable lists metvars = [ 'GMAO_TEMP', 'GMAO_ABSH', 'GMAO_SURF', 'GMAO_PSFC', 'GMAO_UWND', 'GMAO_VWND' ] species = [ 'O3', 'NO2', 'NO', 'NO3', 'N2O5', 'HNO4', 'HNO3', 'HNO2', 'PAN', 'PPN', 'PMN', 'R4N2', 'H2O2', 'MP', 'CH2O', 'HO2', 'OH', 'RO2', 'MO2', 'ETO2', 'CO', 'C2H6', 'C3H8', 'PRPE', 'ALK4', 'ACET', 'ALD2', 'MEK', 'RCHO', 'MVK', 'SO2', 'DMS', 'MSA', 'SO4', 'ISOP' ] all_species_not_TRA = [ 'A3O2', 'ATO2', 'B3O2', 'EOH', 'ETO2', 'ETP', 'GLYX', 'HO2', 'IAP', 'INO2', 'INPN', 'ISN1', 'ISNOOA', 'ISNOOB', 'ISNOHOO', 'ISNP', 'KO2', 'MAN2', 'MAO3', 'MAOP', 'MAOPO2', 'MCO3', 'MGLY', 'MO2', 'MRO2', 'MRP', 'OH', 'PO2', 'PP', 'PRN1', 'PRPN', 'R4N1', 'R4O2', 'R4P', 'RA3P', 'RB3P', 'RCO3', 'RIO2', 'ROH', 'RP', 'VRO2', 'VRP', 'LISOPOH', 'ISOPND', 'ISOPNB', 'HC5', 'DIBOO', 'HC5OO', 'DHMOB', 'MOBAOO', 'ISOPNBO2', 'ISOPNDO2', 'ETHLN', 'MACRN', 'MVKN', 'PYAC', 'IEPOXOO', 'ATOOH', 'PMNN', 'MACRNO2', 'PMNO2' ] OH_reactivity=[ 'NO', 'ISOP', 'GMAO_TEMP', 'GMAO_PSFC', 'CO', 'ACET', 'ALD2', 'MEK', 'MVK', 'MACR', 'C3H8', 'CH2O', 'C2H6', 'SO2', 'NO2', 'ISOPNB', 'ISOPND', 'NO3', 'HNO2', 'HNO3', 'OH', 'HO2', 'H2O2', 'MP', 'ATOOH', 'HNO4', 'ALK4', 'ISN1', 'R4N2', 'RCHO', 'ROH', 'PRPE', 'PMN', 'GLYC', 'GLYX', 'MGLY', 'HAC', 'INPN', 'PRPN', 'ETP', 'RA3P', 'RB3P', 'R4P', 'RP', 'PP', 'RIP', 'IEPOX', 'IAP', 'VRP', 'MRP', 'MAOP', 'MAP', 'DMS', 'HBr', 'Br2', 'BrO', 'CHBr3', 'CH2Br2', 'CH3Br', 'HC5', 'ISOPND', 'ISOPNB', 'ISNP', 'MVKN', 'MACRN', 'DHMOB', 'MOBA', 'ETHLN', 'PROPNN' ] OH_Extras4nic = [ 'OH', 'MCO3', 'A3O2', 'PO2', 'R4O2', 'R4O2', 'R4N1', 'ATO2', 'KO2', 'RIO2', 'VRO2', 'MRO2', 'MAN2', 'B3O2', 'INO2', 'ISNOOA', 'ISNOOB', 'ISNOHOO', 'PRN1', 'RCO3', 'MAO3', 'IEPOXOO', 'MAOPO2', 'MAOPO2', 'HC5OO', 'HC5OO', 'ISOPNDO2', 'ISOPNDO2', 'ISOPNBO2', 'ISOPNBO2', 'DIBOO', 'DIBOO', 'MOBAOO', 'MOBAOO', 'H2', 'CH4', 'HCOOH', 'MOH', 'ACTA', 'EOH', 'VRP' ] # remove inactive species inactive_spec = ['ACTA', 'CH4', 'EOH', 'H2', 'HCOOH', 'MOH'] [ OH_Extras4nic.pop(ii) for ii in sorted([ OH_Extras4nic.index(i) \ for i in inactive_spec ])[::-1] ] # Setup list of tracers if ver == '1.7': ntracers=85 if ver == '2.0': ntracers=101 if ver == 'johan_br.v92': ntracers=87 TRAs = ['TRA_'+ str(i) for i in range(1, ntracers+1) ] # TRAs = ['TRA_{:0>2}'.format(i) for i in range(1, ntracers+1) ] # Setup list of reactions ( photolysis and general ) if ver == '1.5': PHOT_1st, PHOT_last = 455, 533 if ver == '1.6': PHOT_1st, PHOT_last = 453, 531 if ver == '1.6.1': PHOT_1st, PHOT_last = 453, 530 if ver == '1.7': PHOT_1st, PHOT_last = 453, 529 if ver == '2.0': PHOT_1st, PHOT_last = 413, 614 JREAs = ['REA_'+ str(i) for i in range(PHOT_1st, PHOT_last) ] REAs_all = ['REA_'+ str(i) for i in range(0, 533) ] # reduced list for high time and spatial resolution if any( [ input ==i for i in 'slist_v9_2_NREA_red', 'slist_v9_2_NREA_red_NOy'] ): TRAs = GC_var('active_I') + ['AERI'] TRAs= [ num2spec( i, ver=ver, invert=True) for i in TRAs ] TRAs = [ 'TRA_{:0>2}'.format( i) for i in TRAs ] metvars = [ i for i in metvars if not any( [ (i==ii) \ for ii in 'GMAO_ABSH', 'GMAO_SURF', 'GMAO_PSFC' ] ) ] species = [ i for i in species if not any( [ (i==ii) for ii in 'R4N2', 'MP', 'CH2O', 'MO2', 'ETO2', 'CO', 'C2H6', 'C3H8', 'PRPE', 'ALK4', 'ACET', 'ALD2', 'MEK', 'RCHO', 'MVK', 'DMS', 'MSA', 'ISOP' ]) ] if input =='slist_ClearFlo': TRAs = 'CO', 'ACET', 'ALD2', 'ISOP', 'C2H6', 'C3H8', 'CH2O', \ 'MACR', 'HNO2', 'HNO3', 'MVK', 'NO', 'NO2', 'PAN', 'O3', TRAs= [ num2spec( i, ver=ver, invert=True) for i in TRAs ] TRAs = [ 'TRA_{:0>2}'.format( i) for i in TRAs ] # mannually add ethanol TRAs += [ 'TRA_86'] species = [ 'OH', 'MO2','HO2' ] if input =='slist_v9_2_NREA_red_NOy': # THIS IS NOT A GOOD APPROACH, use actual names an tranlate based on verison. # missing = [ 'TRA_17', 'TRA_60', 'TRA_30', 'TRA_31', 'TRA_50', \ # 'TRA_54', 'TRA_55', 'TRA_57' ] # use tracer TRA_60, TRA_30, TRA_31, for: 'MMN' , 'NH3' , 'NH4', 'R4N2', 'BrNO2', 'BrNO3','MPN', 'PROPNN', missing = 'MMN' , 'NH3' , 'NH4', 'R4N2', 'BrNO2', 'BrNO3','MPN', 'PROPNN' missing = [ num2spec( i, ver=ver, invert=True) for i in missing ] missing = [ 'TRA_{:0>2}'.format( i) for i in missing ] species = species + missing # Construct dictionary d= { 'species' : species, 'metvars' : metvars, 'REAs_all' : REAs_all, 'JREAs': JREAs, 'TRAs' : TRAs, 'slist' : species +TRAs +JREAs+ metvars , 'slist_v9_2_NH' : species + TRAs[:66] + metvars , 'slist_v9_2_NREA' : species + TRAs + metvars , 'slist_v9_2_NREA_red': species + TRAs + metvars, 'slist_REAs_all' : species + TRAs + REAs_all + metvars, 'slist_REAs_all_OH' : species + TRAs + metvars+OH_reactivity, 'slist_REAs_all_OH_extras' : all_species_not_TRA + TRAs + metvars, 'slist_v9_2_NREA_red_NOy' : species + TRAs + metvars, 'slist_v10_1.7_allspecs': all_species_not_TRA +TRAs+ JREAs +metvars, 'slist_ClearFlo': species + TRAs + metvars } # retrieve variable list from dictionary vars = d[input] # return unique list vars = sorted( list( set( vars) ) ) print vars return vars # -------------- # 1.02 - Translator for planeflight species to GEOS-Chem species # ------------- def what_species_am_i(input=None, V_9_2=True, V_9_2_C=False, \ ver='1.7', special_case=None, invert=False, rtn_dict=False, \ debug=False ) : """ What GEOS-Chem (GC) Species am i? takes TRA_## & returns GC ID or other wayround """ # select correct naming dictionary var ={ \ '1.7': 'GCFP_d2TRA_all_1.7', '1.6': 'GCFP_d2TRA_all_1.6' }[ver] # special_case = 'EOH' # special_case = 'EOH + actual names' # if all_TRA: # var ={ \ # '1.7': 'all_TRA_spec_met_1.7_EOH' # '1.6': 'GCFP_d2TRA_1.6' # }[ver] if not isinstance( special_case, type(None) ): var = { # 'EOH':'GCFP_d2TRA_all_1.7_EOH', # 'EOH + actual names':'GCFP_d2TRA_all_1.7_EOH_actual_names' # 'all_TRA_spec_met_1.7_EOH' : 'TRA_spec_met_all_1.7_EOH' #' 'all_TRA_spec_met_1.7_EOH':'TRA_spec_met_all_1.7_EOH_no_trailing_zeroes' # TRA_spec_met_all_1' }[special_case] # Get dictionary from variable store d = GC_var( var ) if debug: print d if invert: d = {v: k for k, v in d.items()} # return dictionary if rtn_dict: return d else: return d[input] # ---------------- Section 2 ------------------------------------------- # -------------- Drivers # # -------------- # 2.01 - Convert Production/Loss RD IDs for O3 to PD## for input.geos/tracer.dat linked files # ------------- def PLO3_to_PD(PL, fp=True, wd=None, ver='1.6', res='4x5',debug=False): """ Converts """ if any( [(ver ==i) for i in '1.3' ,'1.4' ,'1.5' , '1.6', '1.7' ]): if wd==None: if debug: print 'WARNING: Using MUTD wd' wd = MUTD_runs(ver=ver, res=res, debug=debug)[0] PDs, vars = p_l_species_input_geos( wd, ver=ver, rm_multiple_tagged_rxs=True) # Add other vars for ease of processing vars += ['PIOx', 'iLOX', 'LIOx', 'iPOX', 'POX', 'LOX', 'LOx', 'L_Iy'] PDs += ['PIOx', 'iLOX', 'LIOx', 'iPOX', 'POX', 'LOX', 'LOx', 'L_Iy'] return dict( zip(vars, PDs))[PL ] else: print 'update programme - manual PLdict now obsolete. ' # ------------- # 2.02 - DRIVER - uses functions to build a dictionary for a given family of loss # ------------- def get_pl_dict( wd, spec='LOX' , rmx2=False, debug=False): # Get reaction IDs for each rxn. in spec (p/l, e.g. LOX) nums, rxns, tags, Coe = prod_loss_4_spec( wd, spec, all_clean=True, debug=debug ) # Make a dictionary of coeffiecnts of reaction Coe_dict = dict(zip(nums, Coe) ) # unpack for mulutple tags of same reactions, then get details unpacked_tags = [j for k in tags for j in k ] details = [ get_tag_details( wd, tag ) for tag in unpacked_tags ] # Kludge - 1 rxn missing from POx tracking? - 999 + "'ISOPND+OH', '+', '=1.0ISOPND'" [ details.pop(n) for n, i in enumerate( details ) if i[1]==364] ind = [n for n, i in enumerate( nums ) if i ==354 ] # Get Coes and overwrite where prog_mod_tms has values Coes = [ get_rxn_Coe( wd, d[1], unpacked_tags[n], nums=nums, rxns=rxns, tags=tags, Coe=Coe, spec=spec, debug=debug ) for n, d in enumerate( details ) ] # Remove double ups, which are present due to Loss (LO3_??) and rate tagging (RD??) originally performed separately if rmx2: d = [ ['RD62', 'LO3_38'], ['RD59', 'LO3_30'], ['RD65', 'LO3_34'], \ ['RD93', 'LO3_55'], ['RD92', 'LO3_39'], [ 'RD95', 'LO3_36'], \ ['RD67', 'LO3_35'] ] d = [i[0] for i in d ] ind = [ n for n, i in enumerate(details) if any( [ i[0] == ii \ for ii in d ] ) ] if debug: print d, ind, [len(i) for i in details, Coes ] , [ [i[0] \ for i in details][ii] for ii in ind ][::-1] [ l.pop(i) for i in ind[::-1] for l in details, Coes ] if debug: print [len(i) for i in details, Coes ] # return a dictionary indexed by p/l tracer, with rxn #, # reaction str and Coe of rxn. return dict( zip( [i[0] for i in details], [ i[1:] + [ Coes[n] ] for n, i in enumerate( details) ] ) ) # ------------- # 2.03 - Get prod loss reactions for a given family. # ------------- def prod_loss_4_spec( wd, fam, all_clean=True, debug=False ): # --- Get Dict of all reactions, Keys = #s rdict = rxn_dict_from_smvlog(wd) # --- Get reaction # tracked by p/l diag for spec and coefficient. rxns = rxns_in_pl(wd, fam) nums = rxns.keys() Coe = [ rxn[-1] for rxn in rxns.values() ] # --- get all details from full reaction dictionary rxns = [ rdict[i] for i in nums ] # --- get tags for tracked reactions, state where reactions are un tracked tags = get_p_l_tags( rxns ) # --- cleaned tags if all_clean: tags = [ [re.sub('\+\d.\d', '', i) for i in u ] for u in tags ] tags = [ [re.sub('\=\d.\d', '', i) for i in u ] for u in tags ] # -- remove erroneous read/ Kludge on val # --- Fortran write error leads to combination of species at the # end of long line if (debug): print [ i[:3] for i in nums, rxns, tags, Coe] print [ len(i) for i in nums, rxns, tags, Coe] errs = ['LO3_36RD95' , 'ISOPNDPO3_50'] cerrs = [['LO3_36', 'RD95'], ['PO3_50'] ] for n, e in enumerate( errs ): try: ind = [ nn for nn, i in enumerate( tags) if any([ ( e in ii) for ii in i ]) ] [0] vars = [ i[ind] for i in nums, rxns, tags, Coe] if (debug): print 3, [ i[-1] for i in nums, rxns, tags, Coe], vars, [ len(i) for i in nums, rxns, tags, Coe] [i.pop(ind) for i in nums, rxns, tags, Coe ] # add the cerrs values on the end if (debug): print 4, [ i[-1] for i in nums, rxns, tags, Coe], [ len(i) for i in nums, rxns, tags, Coe] nums += [ vars[0] ] rxns += [vars[1] ] tags += [cerrs[n]] Coe += [vars[-1] ] if (debug): print 6, [ i[-1] for i in nums, rxns, tags, Coe], \ [ len(i) for i in nums, rxns, tags, Coe] print '->'*30, 'SUCCESS', n, e except: print '>'*100, 'FAIL' , n, e return nums, rxns, tags, Coe # ------------------------------------------- Section 3 ------------------------------------------- # -------------- GeosChem (bpch) prod loss variables # # -------------- # 3.01 - Spec to photolysis reaction p/l tag # ------------- def spec_phot_2_RD(spec): d = { 'OIO': 'RD67', 'ICl': 'RD74', 'I2O2': 'RD70', 'I2': 'RD64', \ 'CH2ICl': 'RD88', 'HOI': 'RD65', 'CH2IBr': 'RD89', 'INO': 'RD75', \ 'IO': 'RD66', 'CH2I2': 'RD72','CH3IT': 'RD71', 'IONO2': 'RD69', \ 'IONO': 'RD68', 'IBr': 'RD73' \ } return d[spec] # ------------- # 3.02 - Get families for reactions # ------------- def get_tag_fam( tag ): """ dictionary of manually constructed assignment list - Ox loss familes - addition for paranox Kludge (in v9-2 (patched), but removed in v10? ) """ # Ox family dictionary fam_d = { 'LO3_18': 'Photolysis', 'LR25': 'Bromine', 'LR21': 'Bromine', 'LO3_38': 'Iodine', 'LO3_63': 'NOy', 'LO3_10': 'HOx', 'LO3_34': 'Iodine', 'LO3_35': 'Iodine', 'LO3_30': 'Iodine', 'LR5': 'Bromine', 'LR6': 'Bromine', 'LO3_61': 'NOy', 'LO3_60': 'NOy', 'LO3_39': 'Iodine', 'LO3_05': 'HOx', 'LO3_07': 'NOy', 'LO3_06': 'HOx', 'LO3_49': 'NOy', 'LO3_62': 'NOy', 'LO3_03': 'HOx', 'LO3_02': 'HOx', 'LO3_67': 'NOy', 'LO3_66': 'NOy', 'LO3_69': 'NOy', 'LO3_42': 'NOy', 'LO3_41': 'NOy', 'LO3_40': 'NOy', 'LO3_47': 'HOx', 'LO3_46': 'NOy', 'LO3_09': 'HOx', 'LO3_44': 'NOy', 'LR37': 'HOx', 'LR36': 'NOy', 'LO3_65': 'NOy', 'LR30': 'Bromine', 'LO3_24': 'Iodine', 'LR10': 'Bromine', 'LR38': 'NOy', 'LO3_68': 'NOy', 'LO3_64': 'NOy', 'LO3_36': 'Iodine', 'LO3_57': 'NOy', 'LO3_72': 'NOy', 'RD98': 'Photolysis', 'LO3_71': 'NOy', 'LO3_58': 'NOy', 'LO3_54': 'Photolysis', 'LO3_55': 'Iodine', 'LO3_56': 'HOx', 'LO3_08': 'HOx', 'LO3_50': 'NOy', 'LO3_51': 'NOy', 'LO3_52': 'NOy', 'LO3_53': 'HOx' # added , 'RD63': 'Iodine', 'RD62':'Iodine', 'LO3_38': 'Iodine', 'RD59': 'Iodine', 'LO3_30' : 'Iodine', 'RD65': 'Iodine', 'LO3_34': 'Iodine', 'RD93': 'Iodine', 'LO3_55': 'Iodine', 'RD92': 'Iodine', 'LO3_39': 'Iodine' , 'LO3_36': 'Iodine','RD95': 'Iodine' , 'RD67': 'Iodine', 'LO3_35': 'Iodine' # , 'RD36': 'Bromine' # Kludge to allow combination reactions # Kludge - from Chris Holmes (paranox deposition, goes through p/l as Ox losss ) ,'LO3_70' : 'Photolysis' # Extra tags not in list? # (these reactions are appearing due to lack of inclusion of iodine # species in Ox family... ) # ,'RD19': 'iodine', 'RD37': 'iodine', 'RD01': 'iodine' } # Creigee reaction class/"family" dictionary # if cregiee: # fam_d ={ # } return fam_d[tag] # ----------------- Section 4 ------------------------------------------- # -------------- GeosChem (bpch) general variables # # 4.01 - v9-2 species in input.geos from num # 4.02 - Get Species Mass # 4.03 - Get Species stioch # 4.04 - GEOS-Chem/ctm.bpch values (current main dict ) # 4.05 - latex species name # 4.06 - converts P/L tracer mulitpler to 1 # 4.07 - Returns tracers unit and scale (if requested) # 4.08 - Store of dirs for earth0, atmosviz1, and tms mac # 4.09 - Ox in species (redundant now? should adapt species stoich ) # 4.10 - Get GAW site info (lat, lon, alt (press), timezone (UTC) ) # 4.99 - Reference data, (inc. grid data) from gchem - credit: GK (Gerrit Kuhlmann ) # -------------- # 4.01 - v9-2 species in input.geos from num # ------------- def num2spec( num=69, rtn_dict=False, invert=False, ver = '1.7' ): # get dictionary of tracer numbers d= what_species_am_i( ver=ver, rtn_dict=True, special_case=None ) # slice off just numbers nums =[ int(i[4:]) for i in d.keys()] # re-make dictionary d = dict( zip(nums, d.values() ) ) # inver to give spec for num if invert: d = { v: k for k, v in d.items() } if rtn_dict: return d else: return d[num] # -------------- # 4.02 - RMM (Mass) (g /mol) for species # ------------- # C3H5I == C2H5I (this is a vestigle typo, left in to allow for use of older model runs def species_mass(spec): d = { 'HIO3': 176.0, 'OCPO': 12.0, 'Br2': 160.0, 'OCPI': 12.0, 'O3': 48.0, 'PAN': 121.0, 'ACET': 12.0, 'RIP': 118.0, 'BrNO3': 142.0, 'Br': 80.0, 'HBr': 81.0, 'HAC': 74.0, 'ALD2': 12.0, 'HNO3': 63.0, 'HNO2': 47.0, 'C2H5I': 168.0, 'HNO4': 79.0, 'OIO': 159.0, 'MAP': 76.0, 'PRPE': 12.0, 'CH2I2': 268.0, 'IONO2': 189.0, 'NIT': 62.0, 'CH3Br': 95.0, 'C3H7I': 170.0, 'C3H8': 12.0, 'DMS': 62.0, 'CH2O': 30.0, 'CH3IT': 142.0, 'NO2': 46.0, 'NO3': 62.0, 'N2O5': 105.0, 'H2O2': 34.0, 'DST4': 29.0, 'DST3': 29.0, 'DST2': 29.0, 'DST1': 29.0, 'MMN': 149.0, 'HOCl': 52.0, 'NITs': 62.0, 'RCHO': 58.0, 'C2H6': 12.0, 'MPN': 93.0, 'INO': 157.0, 'MP': 48.0, 'CH2Br2': 174.0, 'SALC': 31.4, 'NH3': 17.0, 'CH2ICl': 167.0, 'IEPOX': 118.0, 'ClO': 51.0, 'NO': 30.0, 'SALA': 31.4, 'MOBA': 114.0, 'R4N2': 119.0, 'BrCl': 115.0, 'OClO': 67.0, 'PMN': 147.0, 'CO': 28.0, 'BCPI': 12.0, 'ISOP': 12.0, 'BCPO': 12.0, 'MVK': 70.0, 'BrNO2': 126.0, 'IONO': 173.0, 'Cl2': 71.0, 'HOBr': 97.0, 'PROPNN': 109.0, 'Cl': 35.0, 'I2O2': 286.0, 'I2O3': 302.0, 'I2O4': 318.0, 'I2O5': 338.0, 'MEK': 12.0, 'HI': 128.0, 'ISOPN': 147.0, 'SO4s': 96.0, 'I2O': 270.0, 'ALK4': 12.0, 'MSA': 96.0, 'I2': 254.0, 'PPN': 135.0, 'IBr': 207.0, 'MACR': 70.0, 'I': 127.0, 'AERI': 127.0, 'HOI': 144.0, 'BrO': 96.0, 'NH4': 18.0, 'SO2': 64.0, 'SO4': 96.0, 'IO': 143.0, 'CHBr3': 253.0, 'CH2IBr': 221.0, 'ICl': 162.0, 'GLYC': 60.0 # species, not in tracer list , 'HO2': 33.0, 'OH': 17.0,'CH4':16.0 , 'N':14.0, 'CH3I':142.0, 'CH2OO':46.0, 'S': 32.0, } return d[spec] # -------------- # 4.03 - return the stiochometry of Iodine in species # -------------- def spec_stoich( spec, IO=False, I=False, NO=False, OH=False, N=False, C=False ): # if I: # note - re-write to take stioch species (e.g. OH, I instead of booleans ) - asssume I == True as default # C3H5I == C2H5I (this is a vestigle typo, left in to allow for use of older model runs # aerosol cycling specs # 'LO3_36' : (2.0/3.0) , 'LO3_37' : (2.0/4.0), # aersol loss rxns... 'LO3_37' isn't true loss, as I2O4 is regen. temp # aerosol loss rxns - corrected stochio for Ox, adjsutment need for I d = { 'RD11': 1.0, 'RD10': 1.0, 'HIO3': 1.0, 'RD15': 1.0, 'RD62': 2.0, 'RD17': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'LO3_37': 0.5, 'CH2I2': 2.0, 'AERII': 1.0, 'CH2ICl': 1.0, 'PIOx': 1.0, 'C3H7I': 1.0, 'RD73': 1.0, 'RD72': 2.0, 'RD71': 1.0, 'RD70': 1.0, 'C3H5I': 1.0, 'RD57': 1.0, 'CH3IT': 1.0, 'IO': 1.0, 'LO3_38': 1.0, 'RD61': 1.0, 'RD68': 1.0, 'I2': 2.0, 'IONO': 1.0, 'LO3_36': 0.6666666666666666, 'INO': 1.0, 'RD88': 1.0, 'RD89': 1.0, 'LOx': 1.0, 'RD06': 1.0, 'RD07': 1.0, 'RD02': 1.0, 'RD01': 1.0, 'I': 1.0, 'LO3_24': 0.5, 'AERI': 1.0, 'HOI': 1.0, 'RD64': 2.0, 'RD65': 1.0, 'RD66': 1.0, 'RD67': 1.0, 'RD60': 1.0, 'RD47': 1.0, 'C2H5I': 1.0, 'RD63': 1.0, 'RD20': 1.0, 'RD22': 1.0, 'RD24': 1.0, 'RD69': 1.0, 'RD27': 1.0, 'OIO': 1.0, 'CH2IBr': 1.0, 'LIOx': 1.0, 'L_Iy': 1.0, 'ICl': 1.0, 'IBr': 1.0, 'RD95': 2.0, 'I2O2': 2.0, 'I2O3': 2.0, 'I2O4': 2.0, 'I2O5': 2.0, 'HI': 1.0, 'I2O': 2.0, 'RD59': 1.0, 'RD93': 2.0, 'RD92': 1.0, 'IONO2': 1.0, 'RD58': 1.0, # p/l for: IO, I 'RD15': 1.0, 'RD17': 1.0, 'RD75': 1.0, 'RD72': 2.0, 'RD71': 1.0, 'RD70': 1.0, 'RD56': 1.0, 'RD69': 1.0, 'RD88': 1.0, 'RD89': 1.0, 'RD06': 1.0, 'RD07': 1.0, 'RD08': 1.0, 'RD64': 2.0, 'RD65': 1.0, 'RD67': 1.0, 'RD46': 2.0, 'RD47': 1.0, 'RD20': 1.0, 'RD22': 1.0, 'RD68': 1.0, 'RD25': 1.0, 'RD96': 1.0 , 'RD11': 1.0, 'RD12': 2.0, 'RD02': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'RD24': 1.0, 'RD09': 1.0, 'RD23': 1.0, 'RD37': 1.0, 'RD97': 1.0, # kludge for test analysis (HEMCO emissions ) 'ACET' : 1.0, 'ISOP': 1.0, 'CH2Br2': 1.0, 'CHBr3':1.0, 'CH3Br':1.0 } if IO: d = { 'RD11': 2.0, 'RD10': 1.0, 'RD12': 2.0, 'LO3_36': 1./3., 'RD09': 1.0, 'RD66': 1.0, 'RD23': 1.0, 'RD37': 1.0, 'LO3_24': 1.0/2.0, 'RD56': 1.0, 'RD01': 1.0, 'RD08': 1.0, 'RD46': 2.0, 'RD30': 1.0, 'RD25': 1.0, 'RD27': 1.0, 'RD97':1.0 } if NO: d ={ 'NO2': 1.0, 'NO3': 1.0, 'N2O5': 2.0, 'NO': 1.0, 'PPN': 1.0, 'R4N2': 1.0, 'BrNO3': 1.0, 'INO': 1.0, 'PAN': 1.0, 'PMN': 1.0, 'HNO3': 1.0, 'HNO2': 1.0, 'NH3': 1.0, 'HNO4': 1.0, 'BrNO2': 1.0, 'IONO': 1.0, 'PROPNN': 1.0, 'NH4': 1.0, 'MPN': 1.0, 'MMN': 1.0, 'ISOPN': 1.0, 'IONO2': 1.0 } if OH: d = { 'LO3_18': 2.0, 'LO3_03': 1.0, 'RD95': 1.0, 'PO3_14': 1.0 } if N: d= { 'RD10': 1.0, 'LR26': 1.0, 'LR27': 1.0, 'LR20': 1.0, 'RD17': 1.0, 'RD16': 1.0, 'RD19': 1.0, 'RD18': 2.0, 'LR28': 1.0, 'LO3_30': 1.0, 'RD75': 1.0, 'LR7': 1.0, 'LR8': 1.0, 'RD56': 1.0, 'RD24': 1.0, 'LO3_39': 1.0, 'RD25': 1.0, 'RD81': 1.0, 'LR35': 1.0, 'LR18': 1.0, 'LR17': 1.0, 'LR11': 1.0, 'LR39': 1.0, 'RD20': 1.0, 'RD21': 2.0, 'RD22': 1.0, 'RD23': 1.0, 'RD68': 1.0, 'RD69': 1.0 # NOy ( N in 'NOy') , 'NO2': 1.0, 'NO3': 1.0, 'N2O5': 2.0, 'NO': 1.0, 'PPN': 1.0, 'R4N2': 2.0, 'BrNO3': 1.0, 'INO': 1.0, 'PAN': 1.0, 'PMN': 1.0, 'HNO3': 1.0, 'HNO2': 1.0, 'NH3': 1.0, 'HNO4': 1.0, 'BrNO2': 1.0, 'IONO': 1.0, 'PROPNN': 1.0, 'NH4': 1.0, 'MPN': 1.0, 'MMN': 1.0, 'ISOPN': 1.0, 'IONO2': 1.0 } if C: d = { 'ACET': 3.0, 'ALD2': 2.0, 'C2H6': 2.0, 'C3H8': 3.0, 'ISOP': 5.0 } return d[spec] # -------------- # 4.04 - GEOS-Chem/ctm.bpch values # -------------- def GC_var(input_x=None, rtn_dict=False, debug=False): """ Note: Most of this dictionary is vestigial. <= ACTION NEEDED ( remove redundant variables ) f_var = GC flux (EW, NS , UP) variables Ox = 'Ox', 'POX', 'LOX' + list of drydep species # not inc. 'NO3df', 'HNO4df', 'BrOdf' , 'BrNO2', 'IO', 'IONO', 'OIO', Ox_p = Ox prod list Ox-l = Ox loss list d_dep = dry dep (category, name = species) w_dep = wet dep ( 3x categories (WETDCV = rain out loss in convective updrafts (kg/s), WETDLS = rainout in large scale precip (kg/s), CV-FLX = Mass change due to cloud convection (kg/s); name = species) BL_m = UPWARD MASS FLUX FROM BOUNDARY-LAYER MIXING, (category, name = species) f_strat = strat flux (to tropsosphere) (category, name = species) """ if (debug): print 'GC_var called' GC_var_dict = { # Ox budget analysis 'f_var' : ['EW-FLX-$', 'NS-FLX-$', 'UP-FLX-$' ], # 'r_t' : [ 'Photolysis','HOx','NOy' ,'Bromine', 'Iodine' ], # 'r_tn' : ['NOy' ,'Photolysis','HOx' ,'Bromine', 'Iodine' ], 'r_t' : [ 'Photolysis','HOx','Bromine', 'Iodine' ], 'r_tn' : ['Photolysis','HOx' ,'Bromine', 'Iodine' ], 'r_tn_lc' : ['photolysis','HOx' ,'bromine', 'iodine' ], # 'Ox' : ['Ox','POX','LOX','O3df','NO2df', 'PANdf', 'PMNdf', 'PPNdf', 'N2O5df','HNO3df', 'HOBrdf','BrNO3df','HOIdf','IONO2df', 'I2O2df', 'I2O4df','I2O3df'], # 'Ox1.1' : ['Ox','POX','LOX','O3df','NO2df', 'PANdf', 'PMNdf', 'PPNdf', 'N2O5df','HNO3df', 'HOBrdf','BrNO3df','HOIdf','IONO2df', 'I2O2df'], # 'Ox_spec' : ['O3', 'NO2', 'NO3', 'PAN', 'PMN', 'PPN', 'HNO4', 'N2O5', 'HNO3', 'BrO', 'HOBr', 'BrNO2', 'BrNO3', 'MPN', 'IO', 'HOI', 'IONO', 'IONO2', 'OIO', 'I2O2', 'I2O4', 'I2O3'], # 'Ox_spec1.1' : ['O3', 'NO2', 'NO3', 'PAN', 'PMN', 'PPN', 'HNO4', 'N2O5', 'HNO3', 'BrO', 'HOBr', 'BrNO2', 'BrNO3', 'MPN', 'IO', 'HOI', 'IONO', 'IONO2', 'OIO', 'I2O2', 'I2O4'], # 'Ox_p_1.3' : ['PO3_85', 'PO3_76', 'PO3_62', 'PO3_63', 'RD06', 'PO3_01', 'PO3_77', 'PO3_86', 'PO3_79', 'PO3_14', 'PO3_87', 'PO3_66', 'PO3_15', 'PO3_67', 'PO3_51', 'PO3_88', 'PO3_56', 'PO3_89', 'PO3_90', 'PO3_72', 'PO3_91', 'PO3_02', 'PO3_03', 'PO3_92', 'PO3_64', 'PO3_68', 'LR9', 'PO3_58', 'PO3_57', 'PO3_70', 'PO3_60', 'PO3_65', 'PO3_18', 'PO3_73', 'PO3_19', 'PO3_20', 'PO3_21', 'PO3_22', 'PO3_24', 'PO3_25', 'PO3_26', 'PO3_27', 'PO3_34', 'PO3_35', 'PO3_37', 'PO3_38', 'PO3_39', 'PO3_05', 'PO3_45', 'PO3_69', 'PO3_46', 'PO3_97', 'PO3_47', 'PO3_48', 'PO3_52', 'PO3_53', 'PO3_80', 'PO3_40', 'PO3_54', 'PO3_81', 'PO3_55', 'PO3_74', 'PO3_41', 'PO3_43', 'PO3_93', 'PO3_94', 'PO3_95', 'PO3_96', 'PO3_84', 'PO3_61', 'PO3_83', 'PO3_59', 'PO3_71', 'PO3_98', 'PO3_99', 'PO3100', 'PO3101', 'PO3_75', 'PO3_50'], # 'ISOPND', # 'Ox_l_fp' : ['LO3_18','RD98','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40',#'LO3_48','LO3_04', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10', 'LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30', 'LO3_39', 'LO3_38','LO3_55' , 'LO3_36', 'RD63'], # Iy route # 'Ox_l_fp1.1' : ['LO3_18','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40',#'LO3_48','LO3_04', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10','LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30','LO3_39', 'LO3_38','LO3_55' , 'LO3_36', 'RD63'], # Iy route # 'Ox_l_fp1.3' : ['LO3_18','RD98','LO3_54', # hv # 'LO3_03', 'LO3_02', 'LO3_08', 'LO3_09', 'LO3_06', 'LO3_05', 'LO3_10', 'LO3_53', 'LO3_47', 'LO3_56','LR37', 'LR38', # HOx (ROx route) # HOx # 'LO3_52', 'LO3_51','LO3_50', 'LO3_49', 'LO3_46', 'LO3_44', 'LO3_42', 'LO3_41', 'LO3_40','LR36', 'LO3_58', #'LO3_48','LO3_04',,# NOy route # 'LO3_57', 'LO3_07', 'LO3_60', 'LO3_61', 'LO3_62', 'LO3_63', 'LO3_64', 'LO3_65', 'LO3_66', 'LO3_67', 'LO3_68', 'LO3_69', 'LO3_72','LO3_71', # NOy route # 'LR25','LR21','LR5','LR6' ,'LR30', 'LR10', 'LO3_24', # Bry route # 'LO3_34','LO3_24','LO3_35','LO3_30', 'LO3_39', 'LO3_38','LO3_55' , 'LO3_36'], # Iy route # 'Ox_l_fp_r_' : [(0, 3), (3, 11), (11, 21), (21, 28), (28, None)], #, (-2, -1)] # 'Ox_l_fp_r_1.1' : [(0, 2), (2, 11), (11, 21), (21, 28), (28, None)], #, (-2, -1)] # 'Ox_l_fp_r_1.3' : [(0, 3), (3, 14), (14, 40), (40, 47), (47, None)], #, (-2, -1)] 'fams' : ['I2','HOI','IO', 'I', 'HI+OIO+IONO+INO', 'IONO2','IxOy', 'CH3I', 'CH2IX'], # Iy families 'fams_A' : ['I2','HOI','IO', 'I', 'HI+OIO+IONO+INO', 'IONO2','IxOy', 'CH3I', 'CH2IX', 'AERI'], # Iy families 'fam_slice' : [(0, 1), (1, 2), (2, 3), (3,4 ),(4, 8), (8, 9), (9, 12), (12, 13), (13, None)], # slice 'fam_slice_A' : [(0, 1), (1, 2), (2, 3), (3,4 ),(4, 8), (8, 9), (9, 12), (12, 13), (13, 16),(16, None)], # slice # 'POx_l_fp' : ['PO3_01', 'PO3_03', 'PO3_02', 'PO3_05','PO3_14', 'PO3_15', 'PO3_18', 'PO3_19', 'PO3_20', 'PO3_21', 'PO3_22', 'PO3_24', 'PO3_25', 'PO3_26', 'PO3_27', 'PO3_30', 'PO3_31', 'PO3_32', 'PO3_33', 'PO3_34', 'PO3_35', 'PO3_37', 'PO3_38', 'PO3_39', 'PO3_40', 'PO3_41', 'PO3_43'], 'Ox_key' : ['POX', 'PO3_14', 'PO3_15', 'LOX'],#, 'LO3_18', 'LO3_03', 'LO3_02','LR25', 'LR21', 'LR5','LR6','LO3_34', 'LO3_33','LO3_24', 'LO3_35' ], 'POxLOx' : ['POX', 'LOX'], 'iPOxiLOx' : ['POX', 'LOX', 'iPOX', 'iLOX'], # Iy/ Iodine budget analysis 'BL_FT_UT' : [(0, 6), (6, 26), (26, 38)] , 'n_order' :['CH2IX','CH3I', 'I2', 'HOI','IO', 'I', 'IONO2','HI+OIO+IONO+INO','IxOy' ] , 'n_order_A' :['CH2IX','CH3I', 'I2', 'HOI','IO', 'I', 'IONO2','HI+OIO+IONO+INO','IxOy', 'AERI' ] , 'I_l' : ['RD01', 'RD02', 'RD16', 'RD19', 'RD24', 'RD27'], 'IO_l' : ['RD09', 'RD10', 'RD11', 'RD12', 'RD23', 'LO3_24', 'RD37', 'RD97', 'RD66'], # LO37 swaped for RD97 as LO37 assigned to loss point of I2O3 uptake 'I_p' : ['RD06','RD07','RD10','RD11','RD47','RD15','RD17','RD20','RD22', 'LO3_24', 'RD64', 'RD65', 'RD66', 'RD67','RD68','RD69', 'RD70', 'RD71','RD72', 'RD73', 'RD88', 'RD89'], 'IO_p' : [ 'RD01', 'RD08', 'RD46', 'RD25', 'RD27','RD56'], 'sOH' : ['LO3_18'], 'd_dep' : ['DRYD-FLX'], 'w_dep' : ['WETDCV-$','WETDLS-$'], 'BL_m' : ['TURBMC-$'], 'f_strat' : ['STRT-FL'], 'p_l' : ['PORL-L=$'], 'Cld_flx' : ['CV-FLX-$'], 'I_Br_O3' : ['IO', 'OIO','HOI','I2','I','CH3IT','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO'], 'IOrg_RIS' : ['CH3IT','CH2ICl','CH2I2', 'CH2IBr', 'I2','HOI','I','IO', 'OIO', 'HI','IONO','IONO2'], 'I_specs' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2', 'I2O3','I2O4''CH3IT','CH2I2','I','INO'] , 'Iy' : ['I2','HOI','IO', 'OIO', 'HI','INO','IONO', 'IONO2','I2O2', 'I2O3','I2O4','I'], 'Iy1.1' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I2O4','I','INO'], 'IOy' : ['HOI','IO', 'OIO','IONO','IONO2','INO','I2O2','I2O4', 'I2O3'], 'IOy1.1' : ['HOI','IO', 'OIO','IONO','IONO2','INO','I2O2','I2O4'], 'I2Ox' : ['I2O2','I2O4','I2O3'], 'I2Ox' : ['I2O2','I2O4','I2O3'], 'IyOx1.1' : ['I2O2','I2O4'], 'Iy_no_i2o4' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I','INO', 'I2O3'], 'Iy_no_i2o41.1' : ['I2','HOI','IO', 'OIO', 'HI','IONO', 'IONO2','I2O2','I','INO'], 'Phot_s_Iy' : ['CH3IT','CH2ICl','CH2I2', 'CH2IBr'],#['RD89', 'RD88', 'RD71', 'RD72'], 'HOI' : ['HOI'], 'IOx' : ['IO','I',], 'IO' : ['IO'], 'I' : ['I',], 'OIO' : ['OIO'], 'LIOx' : ['LIOx'], # LOx is p/l tracer name, for Loss of IOx 'PIOx' : ['PIOx'], # LOx is p/l tracer name, for Loss of IOx 'iodine_all' : ['I2','HOI','IO', 'I', 'HI', 'OIO', 'INO', 'IONO','IONO2','I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I','C2H5I','ICl', 'I2O', 'IBr', 'HIO3', ], 'iodine_all_A': ['I2','HOI','IO', 'I', 'HI', 'OIO', 'INO', 'IONO','IONO2','I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I','C2H5I','ICl', 'I2O', 'IBr', 'HIO3','AERI' ], # Misc analysis 'LHOI' : ['RD65', 'RD63', 'RD08'], 'LHOBr' : ['LR25', 'LR30','LR21'], 'LI2' : ['RD64', 'RD06', 'RD22'], 'LCH2I2' : ['RD72' ], 'LCH2Cl' : ['RD88' ] , 'LCH2Br' : ['RD89' ], 'LCH3IT' : ['RD15' , 'RD71'], 'sHOX' : ['HOI', 'HOBr'], 'HO2_loss' : ['PO3_14','RD09','RD02', 'LR2', 'LR3', 'PO3_46','PO3_02', 'PO3_03', 'PO3_05'], 'CAST_int' : ['IO', 'OIO','HOI','I2','I','HOI','CH3I','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C3H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'OH', 'HO2','NO','NO2'], 'CAST_intn' : ['IO', 'OIO','HOI','I2','I','HOI','CH3IT','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'DMS', 'NO','HNO3','HNO4', 'NO2','NO3' , 'PAN' , 'HNO2', 'N2O5'], 'CAST_int_n' : ['IO', 'OIO','HOI','I2','I','HOI','CH3I','CH2I2','CH2ICl', 'CH2IBr', 'C3H7I','C2H5I', 'BrO', 'Br', 'HOBr','Br2','CH3Br', 'CH2Br2', 'CHBr3', 'O3', 'CO', 'OH', 'HO2','NO','NO2'], 'diurnal_sp' : ['IO','I2', 'CH2I2', 'BrO' ] , 'obs_comp' : ['CH3IT','CH2I2','CH2ICl','CH2IBr','C2H5I','C3H7I','I2','IO'] , 'emiss_specs': ['CH3IT','CH2I2','CH2ICl','CH2IBr','I2','HOI'] , 'w_dep_specs': ['I2' ,'HI' ,'HOI' ,'IONO', 'IONO2','I2O2', 'I2O4', 'I2O3' ,'AERI'], #, 'IBr', 'ICl'] # 'd_dep_specsl' : ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3','AERI'], #, 'IO', 'OIO'] , 'd_dep_specsl1.1' : ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'AERI'], #, 'IO', 'OIO'] , 'd_dep_specs': ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df', 'I2O3df', 'AERIdf',], #, 'IOdf', 'OIOdf'], # # 'd_dep_specs1.1': ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df','AERIdf',], #, 'IOdf', 'OIOdf'], # 'I2_het_cyc' : ['RD59','RD92','RD63'], # IONO2, IONO, HOI 'I_het_loss' : [ 'RD58', 'RD62', 'RD93' ,'RD95'], # HI, I2O2, I2O4, I2O3 uptake (prev: 2OIO excuded as I2Ox formaed, IO+OIO included as I2O3 not treated ) #['RD60','RD61','RD62','RD52','RD53','RD54','RD55','RD13'], # RD13 = OIO + OH => HIO3 86 => AERI loss 'NOx' : ['NO', 'NO2' ], 'N_specs' : ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2', 'IONO', 'IONO2', 'INO'], 'N_specs_no_I' : ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2'], 'I_N_tags' : ['RD10', 'RD23', 'RD19', 'RD16', 'RD22', 'RD56', 'RD24', 'LO3_30', 'RD69', 'RD68', 'RD20', 'RD21', 'RD25', 'LO3_39', 'RD17', 'RD18', 'RD75'], 'Br_N_tags' : ['LR7', 'LR18', 'LR17', 'LR11', 'LR8', 'LR20', 'LR26', 'LR28', 'LR27'], 'inactive_I' : ['BrCl', 'OClO', 'ClO', 'HOCl', 'Cl', 'Cl2', 'I2O5', 'I2O', 'HIO3', 'IBr', 'ICl', 'C2H5I','C3H7I'], # I2O3 now active. 'active_I' : ['I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'], 'surface_specs' : ['O3', 'NO', 'NO2', 'NO3' ,'N2O5', 'IO', 'IONO2' ], # Model run title dictionaries 'run_name_dict': {'run': 'Halogens (I+,Br+)', 'Br_2ppt': 'Halogens (I+,Br+) + fixed 2 pptv BrO', 'just_I': 'Just Iodine (I+,Br-)', 'no_hal': 'No Halogens', 'just_Br': 'Just Bromine (I-,Br+)', 'Br_1ppt': 'Halogens (I+,Br+) + fixed 1 pptv BrO', 'obs': 'Observations'} , 'latex_run_names': {'I2Ox_half': 'I$_{2}$Ox loss ($\\gamma$) /2', 'run': 'Iodine simulation.', 'MacDonald_iodide': 'Ocean iodide', 'Sulfate_up': 'Sulfate Uptake', 'I2Ox_phot_exp': 'I$_{2}$Ox exp. X-sections', 'het_double': 'het. cycle ($\\gamma$) x2', 'I2Ox_phot_x2': 'I$_{2}$Ox X-sections x2', 'no_het': 'no het. cycle ', 'I2Ox_double': 'I$_{2}$Ox loss ($\\gamma$) x2', 'just_I': '(I+,Br-)', 'BrO1pptv': 'MBL BrO 1 pptv', 'het_half': 'het. cycle ($\\gamma$) /2', 'Just_I_org': 'Just org. I', 'no_I2Ox': 'No I$_{2}$Ox Photolysis', 'BrO1pptv_ALL' : 'BrO 1 pptv in Trop.', 'BrO2pptv' : 'MBL BrO 2 pptv', # adjust from GBC to ACP names # 'no_hal': '(I-,Br-)', 'Just_Br': '(I-,Br+)', 'no_hal': 'No Halogens', 'Just_Br': 'GEOS-Chem (v9-2)', # kludge for diurnal plot 'Iodine simulation.':'Iodine simulation.', '(I+,Br+)': 'Iodine simulation.','(I+,Br-)': 'Just Iodine', '(I-,Br+)': 'GEOS-Chem (v9-2)', '(I-,Br-)': 'No Halogens'}, # tracer unit handling 'spec_2_pptv' : ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'CH3IT', 'CH2I2', 'IBr', 'ICl', 'I', 'HIO3', 'I2O', 'INO', 'I2O3', 'I2O4', 'I2O5', 'AERI', 'Cl2', 'Cl', 'HOCl', 'ClO', 'OClO', 'BrCl', 'CH2ICl', 'CH2IBr', 'C3H7I', 'C2H5I', 'Br2', 'Br', 'BrO', 'HOBr', 'HBr', 'BrNO2', 'BrNO3', 'CHBr3', 'CH2Br2', 'CH3Br','RCHO', 'MVK', 'MACR', 'PMN', 'PPN', 'R4N2', 'DMS', 'SO4s', 'MSA', 'NITs', 'BCPO', 'DST4', 'ISOPN', 'MOBA', 'PROPNN', 'HAC', 'GLYC', 'MMN', 'RIP', 'IEPOX', 'MAP' ,'N2O5','NO3'], # 'HNO4', 'HNO2'], 'spec_2_pptC' : ['PRPE', 'ISOP'], # global 'spec_2_ppbv': ['NO','DMS', 'RIP', 'IEPOX','BCPO', 'DST4', 'HAC', 'GLYC','MACR', 'ISOP'], 'spec_2_ppbC' : ['ALK4'], # pf dictionaries # WARNING - remove non forwards combatible dicts: # (GCFP_TRA_d ... GCFP_d2TRA ... GCFP_d2TRA_justTRA . etc) # GCFP_TRA_d is in use by CVO plotters - use what_species_am_i instead! 'GCFP_TRA_d' : {'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_96': 'C2H5I', 'TRA_95': 'C3H7I', 'TRA_94': 'CH2IBr', 'TRA_93': 'CH2ICl', 'TRA_92': 'BrCl', 'TRA_91': 'OClO', 'TRA_90': 'ClO', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'I2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_68': 'HOI', 'TRA_69': 'IO', 'TRA_71': 'HI', 'TRA_70': 'OIO', 'TRA_73': 'IONO2', 'TRA_72': 'IONO', 'TRA_75': 'CH3IT', 'TRA_74': 'I2O2', 'TRA_77': 'IBr', 'TRA_76': 'CH2I2', 'TRA_79': 'I', 'TRA_78': 'ICl', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'TRA_23': 'HNO4', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'HIO3', 'TRA_81': 'I2O', 'TRA_82': 'INO', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'I2O5', 'TRA_86': 'AERI', 'TRA_87': 'Cl2', 'TRA_88': 'Cl', 'TRA_89': 'HOCl','O3':'O3', 'CO':'CO'} , # GCFP_d2TRA is in use by IO plotters - use what_species_am_i instead! 'GCFP_d2TRA' : {'HIO3': 'TRA_80', 'OCPO': 'TRA_37', 'PPN': 'TRA_16', 'OCPI': 'TRA_35', 'O3': 'TRA_2', 'PAN': 'TRA_3', 'ACET': 'TRA_9', 'IEPOX': 'TRA_62', 'BrNO3': 'TRA_50', 'Br': 'TRA_45', 'HBr': 'TRA_48', 'HAC': 'TRA_58', 'ALD2': 'TRA_11', 'HNO3': 'TRA_7', 'HNO2': 'TRA_66', 'C2H5I': 'TRA_96', 'HNO4': 'TRA_23', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'TRA_18', 'HI': 'TRA_71', 'CH2I2': 'TRA_76', 'IONO2': 'TRA_73', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'C3H8': 'TRA_19', 'DMS': 'TRA_25', 'CH2O': 'TRA_20', 'CH3IT': 'TRA_75','CH3I': 'TRA_75', 'NO2': 'TRA_64', 'NO3': 'TRA_65', 'N2O5': 'TRA_22', 'CHBr3': 'TRA_51', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'HOCl': 'TRA_89', 'NITs': 'TRA_33', 'RCHO': 'TRA_12', 'C2H6': 'TRA_21', 'MPN': 'TRA_54', 'INO': 'TRA_82', 'MP': 'TRA_24', 'CH2Br2': 'TRA_52', 'SALC': 'TRA_43', 'NH3': 'TRA_30', 'CH2ICl': 'TRA_93', 'RIP': 'TRA_61', 'ClO': 'TRA_90', 'NO': 'TRA_1', 'SALA': 'TRA_42', 'MOBA': 'TRA_56', 'R4N2': 'TRA_17', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'PMN': 'TRA_15', 'CO': 'TRA_4', 'CH2IBr': 'TRA_94', 'ISOP': 'TRA_6', 'BCPO': 'TRA_36', 'MVK': 'TRA_13', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'Cl': 'TRA_88', 'I2O2': 'TRA_74', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'I2O5': 'TRA_85', 'MEK': 'TRA_10', 'MMN': 'TRA_60', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'I2O': 'TRA_81', 'ALK4': 'TRA_5', 'MSA': 'TRA_29', 'I2': 'TRA_67', 'Br2': 'TRA_44', 'IBr': 'TRA_77', 'MACR': 'TRA_14', 'I': 'TRA_79', 'AERI': 'TRA_86', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'NH4': 'TRA_31', 'SO2': 'TRA_26', 'SO4': 'TRA_27', 'IO': 'TRA_69', 'H2O2': 'TRA_8', 'BCPI': 'TRA_34', 'ICl': 'TRA_78', 'GLYC': 'TRA_59','ALK4': 'ALK4', 'MSA': 'MSA', 'MO2': 'MO2', 'C3H8': 'C3H8', 'ISOP': 'ISOP', 'DMS': 'DMS', 'CH2O': 'CH2O', 'O3': 'O3', 'PAN': 'PAN', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'NO': 'NO', 'PPN': 'PPN', 'R4N2': 'R4N2', 'HO2': 'HO2', 'NO2': 'NO2', 'PMN': 'PMN', 'ACET': 'ACET', 'CO': 'CO', 'ALD2': 'ALD2', 'RCHO': 'RCHO', 'HNO3': 'HNO3', 'HNO2': 'HNO2', 'SO2': 'SO2', 'SO4': 'SO4', 'HNO4': 'HNO4', 'C2H6': 'C2H6', 'RO2': 'RO2', 'MVK': 'MVK', 'PRPE': 'PRPE', 'OH': 'OH', 'ETO2': 'ETO2', 'MEK': 'MEK', 'MP': 'MP' , 'GMAO_TEMP':'GMAO_TEMP' }, # 'GCFP_d2TRA_justTRA' : {'HIO3': 'TRA_80', 'OCPO': 'TRA_37', 'PPN': 'TRA_16', 'OCPI': 'TRA_35', 'O3': 'TRA_2', 'PAN': 'TRA_3', 'ACET': 'TRA_9', 'IEPOX': 'TRA_62', 'BrNO3': 'TRA_50', 'Br': 'TRA_45', 'HBr': 'TRA_48', 'HAC': 'TRA_58', 'ALD2': 'TRA_11', 'HNO3': 'TRA_7', 'HNO2': 'TRA_66', 'C2H5I': 'TRA_96', 'HNO4': 'TRA_23', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'TRA_18', 'HI': 'TRA_71', 'CH2I2': 'TRA_76', 'IONO2': 'TRA_73', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'C3H8': 'TRA_19', 'DMS': 'TRA_25', 'CH2O': 'TRA_20', 'CH3IT': 'TRA_75','CH3I': 'TRA_75', 'NO2': 'TRA_64', 'NO3': 'TRA_65', 'N2O5': 'TRA_22', 'CHBr3': 'TRA_51', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'HOCl': 'TRA_89', 'NITs': 'TRA_33', 'RCHO': 'TRA_12', 'C2H6': 'TRA_21', 'MPN': 'TRA_54', 'INO': 'TRA_82', 'MP': 'TRA_24', 'CH2Br2': 'TRA_52', 'SALC': 'TRA_43', 'NH3': 'TRA_30', 'CH2ICl': 'TRA_93', 'RIP': 'TRA_61', 'ClO': 'TRA_90', 'NO': 'TRA_1', 'SALA': 'TRA_42', 'MOBA': 'TRA_56', 'R4N2': 'TRA_17', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'PMN': 'TRA_15', 'CO': 'TRA_4', 'CH2IBr': 'TRA_94', 'ISOP': 'TRA_6', 'BCPO': 'TRA_36', 'MVK': 'TRA_13', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'Cl': 'TRA_88', 'I2O2': 'TRA_74', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'I2O5': 'TRA_85', 'MEK': 'TRA_10', 'MMN': 'TRA_60', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'I2O': 'TRA_81', 'ALK4': 'TRA_5', 'MSA': 'TRA_29', 'I2': 'TRA_67', 'Br2': 'TRA_44', 'IBr': 'TRA_77', 'MACR': 'TRA_14', 'I': 'TRA_79', 'AERI': 'TRA_86', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'NH4': 'TRA_31', 'SO2': 'TRA_26', 'SO4': 'TRA_27', 'IO': 'TRA_69', 'H2O2': 'TRA_8', 'BCPI': 'TRA_34', 'ICl': 'TRA_78', 'GLYC': 'TRA_59','OH': 'OH','HO2': 'HO2',}, 'GCFP_d2TRA_all_1.6' :{'HIO3': 'TRA_80', 'TRA_17': 'TRA_17', 'TRA_16': 'TRA_16', 'TRA_15': 'TRA_15', 'TRA_14': 'TRA_14', 'TRA_13': 'TRA_13', 'TRA_12': 'TRA_12', 'TRA_11': 'TRA_11', 'TRA_19': 'TRA_19', 'ACET': 'ACET', 'RIP': 'TRA_61', 'BrNO3': 'TRA_50', 'HAC': 'TRA_58', 'ALD2': 'ALD2', 'HNO3': 'HNO3', 'HNO2': 'HNO2', 'HNO4': 'HNO4', 'OIO': 'TRA_70', 'MAP': 'TRA_63', 'PRPE': 'PRPE', 'TRA_29': 'TRA_29', 'CH2I2': 'TRA_76', 'I2O2': 'TRA_74', 'NIT': 'TRA_32', 'CH3Br': 'TRA_53', 'C3H7I': 'TRA_95', 'MO2': 'MO2', 'C3H8': 'C3H8', 'I2O5': 'TRA_85', 'TRA_71': 'TRA_71', 'TRA_70': 'TRA_70', 'TRA_73': 'TRA_73', 'DMS': 'DMS', 'TRA_75': 'TRA_75', 'TRA_74': 'TRA_74', 'TRA_77': 'TRA_77', 'TRA_76': 'TRA_76', 'CH2O': 'CH2O', 'TRA_78': 'TRA_78', 'CH3IT': 'TRA_75', 'NO2': 'NO2', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'PAN': 'PAN', 'HOCl': 'TRA_89', 'TRA_18': 'TRA_18', 'GMAO_TEMP': 'GMAO_TEMP', 'RCHO': 'RCHO', 'C2H6': 'C2H6', 'INO': 'TRA_82', 'MP': 'MP', 'CH2Br2': 'TRA_52', 'CH2ICl': 'TRA_93', 'TRA_59': 'TRA_59', 'TRA_58': 'TRA_58', 'IEPOX': 'TRA_62', 'TRA_53': 'TRA_53', 'TRA_52': 'TRA_52', 'TRA_51': 'TRA_51', 'TRA_50': 'TRA_50', 'TRA_57': 'TRA_57', 'TRA_56': 'TRA_56', 'TRA_55': 'TRA_55', 'TRA_54': 'TRA_54', 'MOBA': 'TRA_56', 'CH3I': 'TRA_75', 'BrCl': 'TRA_92', 'OClO': 'TRA_91', 'CO': 'CO', 'BCPI': 'TRA_34', 'ISOP': 'ISOP', 'BCPO': 'TRA_36', 'MVK': 'MVK', 'TRA_28': 'TRA_28', 'Cl': 'TRA_88', 'TRA_26': 'TRA_26', 'TRA_27': 'TRA_27', 'TRA_24': 'TRA_24', 'I2O3': 'TRA_83', 'I2O4': 'TRA_84', 'TRA_23': 'TRA_23', 'TRA_20': 'TRA_20', 'TRA_21': 'TRA_21', 'MMN': 'TRA_60', 'I2O': 'TRA_81', 'HBr': 'TRA_48', 'ALK4': 'ALK4', 'I2': 'TRA_67', 'PPN': 'PPN', 'IBr': 'TRA_77', 'I': 'TRA_79', 'AERI': 'TRA_86', 'NH4': 'TRA_31', 'SO2': 'SO2', 'SO4': 'SO4', 'NH3': 'TRA_30', 'TRA_08': 'TRA_08', 'TRA_09': 'TRA_09', 'TRA_01': 'TRA_01', 'TRA_02': 'TRA_02', 'TRA_03': 'TRA_03', 'TRA_04': 'TRA_04', 'TRA_05': 'TRA_05', 'TRA_06': 'TRA_06', 'TRA_07': 'TRA_07', 'OCPI': 'TRA_35', 'OCPO': 'TRA_37', 'Br2': 'TRA_44', 'O3': 'O3', 'Br': 'TRA_45', 'TRA_96': 'TRA_96', 'TRA_95': 'TRA_95', 'TRA_94': 'TRA_94', 'TRA_93': 'TRA_93', 'TRA_92': 'TRA_92', 'TRA_91': 'TRA_91', 'TRA_90': 'TRA_90', 'TRA_62': 'TRA_62', 'TRA_63': 'TRA_63', 'TRA_60': 'TRA_60', 'TRA_61': 'TRA_61', 'TRA_66': 'TRA_66', 'TRA_67': 'TRA_67', 'C2H5I': 'TRA_96', 'TRA_65': 'TRA_65', 'TRA_68': 'TRA_68', 'TRA_69': 'TRA_69', 'OH': 'OH', 'IONO2': 'TRA_73', 'HI': 'TRA_71', 'CHBr3': 'TRA_51', 'TRA_46': 'TRA_46', 'DST4': 'TRA_41', 'DST3': 'TRA_40', 'DST2': 'TRA_39', 'DST1': 'TRA_38', 'NITs': 'TRA_33', 'TRA_48': 'TRA_48', 'TRA_49': 'TRA_49', 'TRA_44': 'TRA_44', 'TRA_45': 'TRA_45', 'RO2': 'RO2', 'TRA_47': 'TRA_47', 'TRA_40': 'TRA_40', 'TRA_41': 'TRA_41', 'TRA_42': 'TRA_42', 'TRA_43': 'TRA_43', 'MPN': 'TRA_54', 'ETO2': 'ETO2', 'IO': 'TRA_69', 'TRA_64': 'TRA_64', 'ClO': 'TRA_90', 'NO': 'NO', 'SALA': 'TRA_42', 'SALC': 'TRA_43', 'R4N2': 'R4N2', 'PMN': 'PMN', 'TRA_25': 'TRA_25', 'CH2IBr': 'TRA_94', 'TRA_22': 'TRA_22', 'BrNO2': 'TRA_49', 'IONO': 'TRA_72', 'Cl2': 'TRA_87', 'HOBr': 'TRA_47', 'PROPNN': 'TRA_57', 'MEK': 'MEK', 'TRA_72': 'TRA_72', 'ISOPN': 'TRA_55', 'SO4s': 'TRA_28', 'TRA_79': 'TRA_79', 'MSA': 'MSA', 'TRA_39': 'TRA_39', 'TRA_38': 'TRA_38', 'GLYC': 'TRA_59', 'TRA_35': 'TRA_35', 'TRA_34': 'TRA_34', 'TRA_37': 'TRA_37', 'TRA_36': 'TRA_36', 'TRA_31': 'TRA_31', 'TRA_30': 'TRA_30', 'TRA_33': 'TRA_33', 'TRA_32': 'TRA_32', 'HO2': 'HO2', 'MACR': 'TRA_14', 'HOI': 'TRA_68', 'BrO': 'TRA_46', 'ICl': 'TRA_78', 'TRA_80': 'TRA_80', 'TRA_81': 'TRA_81', 'TRA_82': 'TRA_82', 'TRA_83': 'TRA_83', 'TRA_84': 'TRA_84', 'TRA_85': 'TRA_85', 'TRA_86': 'TRA_86', 'TRA_87': 'TRA_87', 'TRA_88': 'TRA_88', 'TRA_89': 'TRA_89','GMAO_TEMP': 'GMAO_TEMP', 'GMAO_UWND': 'GMAO_UWND', 'GMAO_VWND': 'GMAO_VWND'}, # 'GCFP_d2TRA_all_1.7' : {'TRA_74': 'ICl', 'TRA_25': 'DMS', 'TRA_68': 'CH2I2', 'TRA_44': 'Br2', 'TRA_70': 'CH2IBr', 'TRA_22': 'N2O5', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_23': 'HNO4', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_21': 'C2H6', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_69': 'CH2ICl', 'TRA_50': 'BrNO3', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_73': 'IBr', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_77': 'HI', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_72': 'I2', 'TRA_59': 'GLYC', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_20': 'CH2O', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_75': 'I', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_58': 'HAC', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_27': 'SO4', 'TRA_78': 'OIO', 'TRA_66': 'HNO2', 'TRA_71': 'HOI', 'TRA_24': 'MP', 'TRA_67': 'CH3IT', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO'}, 'GCFP_d2TRA_all_1.7' : {'TRA_25': 'DMS', 'TRA_77': 'HI', 'TRA_76': 'IO', 'TRA_23': 'HNO4', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_79': 'INO', 'TRA_78': 'OIO', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_52': 'CH2Br2', 'TRA_46': 'BrO', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_47': 'HOBr', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_81': 'IONO2', 'TRA_35': 'OCPI', 'TRA_57': 'PROPNN', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_56': 'MOBA', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_84': 'I2O4', 'TRA_54': 'MPN', 'TRA_5': 'ALK4', 'TRA_49': 'BrNO2', 'TRA_32': 'NIT', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_59': 'GLYC', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_17': 'R4N2', 'TRA_28': 'SO4s', 'TRA_16': 'PPN', 'TRA_58': 'HAC', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_29': 'MSA', 'TRA_22': 'N2O5', 'TRA_73': 'IBr', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_80': 'IONO', 'TRA_26': 'SO2', 'TRA_82': 'I2O2', 'TRA_72': 'I2', 'TRA_48': 'HBr', 'TRA_85': 'AERI', 'TRA_34': 'BCPI', 'TRA_75': 'I', 'TRA_53': 'CH3Br', 'TRA_74': 'ICl'}, 'GCFP_d2TRA_all_1.7_EOH_actual_names' : {'HNO4': 'HNO4', 'PPN': 'PPN', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'O3': 'O3', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'GMAO_UWND': 'GMAO_UWND', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_65': 'NO3', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'OH': 'OH', 'LAT': 'LAT', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_73': 'IBr', 'TRA_72': 'I2', 'TRA_75': 'I', 'TRA_74': 'ICl', 'TRA_77': 'HI', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_78': 'OIO', 'NO2': 'NO2', 'NO3': 'NO3', 'N2O5': 'N2O5', 'H2O2': 'H2O2', 'GMAO_VWND': 'GMAO_VWND', 'PAN': 'PAN', 'GMAO_TEMP': 'GMAO_TEMP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'NO': 'NO', 'PMN': 'PMN', 'HNO3': 'HNO3', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'TRA_23': 'HNO4', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'RO2': 'RO2', 'LON': 'LON', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'HO2': 'HO2', 'SO2': 'SO2', 'SO4': 'SO4', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'HNO2': 'HNO2', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_86': 'EOH'}, 'TRA_spec_met_all_1.7_EOH': {'MAO3': 'MAO3', 'DHMOB': 'DHMOB', 'ETP': 'ETP', 'RCO3': 'RCO3', 'MO2': 'MO2', 'EOH': 'EOH', 'MVKN': 'MVKN', 'R4P': 'R4P', 'ISNP': 'ISNP', 'RB3P': 'RB3P', 'MGLY': 'MGLY', 'MAOPO2': 'MAOPO2', 'RIO2': 'RIO2', 'PMNN': 'PMNN', 'PP': 'PP', 'VRP': 'VRP', 'RP': 'RP', 'MRO2': 'MRO2', 'HC5': 'HC5', 'ATO2': 'ATO2', 'PYAC': 'PYAC', 'R4N1': 'R4N1', 'DIBOO': 'DIBOO', 'LISOPOH': 'LISOPOH', 'HO2': 'HO2', 'ETHLN': 'ETHLN', 'ISNOOB': 'ISNOOB', 'ISNOOA': 'ISNOOA', 'ROH': 'ROH', 'MAN2': 'MAN2', 'B3O2': 'B3O2', 'INPN': 'INPN', 'MACRN': 'MACRN', 'PO2': 'PO2', 'VRO2': 'VRO2', 'MRP': 'MRP', 'PRN1': 'PRN1', 'ISNOHOO': 'ISNOHOO', 'MOBAOO': 'MOBAOO', 'MACRNO2': 'MACRNO2', 'ISOPND': 'ISOPND', 'HC5OO': 'HC5OO', 'ISOPNBO2': 'ISOPNBO2', 'RA3P': 'RA3P', 'ISOPNB': 'ISOPNB', 'ISOPNDO2': 'ISOPNDO2', 'PMNO2': 'PMNO2', 'IAP': 'IAP', 'MCO3': 'MCO3', 'IEPOXOO': 'IEPOXOO', 'MAOP': 'MAOP', 'INO2': 'INO2', 'OH': 'OH', 'PRPN': 'PRPN', 'GLYX': 'GLYX', 'A3O2': 'A3O2', 'ETO2': 'ETO2', 'R4O2': 'R4O2', 'ISN1': 'ISN1', 'KO2': 'KO2', 'ATOOH': 'ATOOH','GMAO_PSFC': 'GMAO_PSFC', 'GMAO_SURF': 'GMAO_SURF', 'GMAO_TEMP': 'GMAO_TEMP', 'GMAO_ABSH': 'GMAO_ABSH', 'GMAO_UWND': 'GMAO_UWND', 'GMAO_VWND': 'GMAO_VWND', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'TRA_1': 'NO', 'TRA_74': 'ICl', 'TRA_25': 'DMS', 'TRA_68': 'CH2I2', 'TRA_44': 'Br2', 'TRA_70': 'CH2IBr', 'TRA_22': 'N2O5', 'TRA_76': 'IO', 'TRA_79': 'INO', 'TRA_23': 'HNO4', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'TRA_51': 'CHBr3', 'TRA_21': 'C2H6', 'TRA_57': 'PROPNN', 'TRA_56': 'MOBA', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'TRA_69': 'CH2ICl', 'TRA_50': 'BrNO3', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'TRA_73': 'IBr', 'TRA_35': 'OCPI', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'TRA_77': 'HI', 'TRA_83': 'I2O3', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'TRA_72': 'I2', 'TRA_59': 'GLYC', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_48': 'HBr', 'TRA_49': 'BrNO2', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_20': 'CH2O', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'TRA_08': 'H2O2', 'TRA_09': 'ACET', 'TRA_75': 'I', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_01': 'NO', 'TRA_02': 'O3', 'TRA_03': 'PAN', 'TRA_04': 'CO', 'TRA_05': 'ALK4', 'TRA_06': 'ISOP', 'TRA_07': 'HNO3', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_58': 'HAC', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'TRA_27': 'SO4', 'TRA_78': 'OIO', 'TRA_66': 'HNO2', 'TRA_71': 'HOI', 'TRA_24': 'MP', 'TRA_67': 'CH3IT' }, 'TRA_spec_met_all_1.7_EOH_no_trailing_zeroes': {'EOH': 'EOH', 'TRA_17': 'R4N2', 'TRA_16': 'PPN', 'TRA_15': 'PMN', 'TRA_14': 'MACR', 'TRA_13': 'MVK', 'TRA_12': 'RCHO', 'TRA_11': 'ALD2', 'TRA_10': 'MEK', 'TRA_19': 'C3H8', 'TRA_18': 'PRPE', 'DHMOB': 'DHMOB', 'RP': 'RP', 'GMAO_UWND': 'GMAO_UWND', 'MAN2': 'MAN2', 'B3O2': 'B3O2', 'MRP': 'MRP', 'PRN1': 'PRN1', 'TRA_62': 'IEPOX', 'TRA_63': 'MAP', 'TRA_60': 'MMN', 'TRA_61': 'RIP', 'TRA_66': 'HNO2', 'TRA_67': 'CH3IT', 'TRA_64': 'NO2', 'TRA_65': 'NO3', 'TRA_68': 'CH2I2', 'TRA_69': 'CH2ICl', 'IAP': 'IAP', 'MCO3': 'MCO3', 'GMAO_SURF': 'GMAO_SURF', 'OH': 'OH', 'PRPN': 'PRPN', 'TRA7': 'HNO3', 'TRA6': 'ISOP', 'MAO3': 'MAO3', 'RCO3': 'RCO3', 'MO2': 'MO2', 'MACRNO2': 'MACRNO2', 'TRA_23': 'HNO4', 'TRA_71': 'HOI', 'TRA_70': 'CH2IBr', 'TRA_73': 'IBr', 'TRA_72': 'I2', 'TRA_75': 'I', 'TRA_74': 'ICl', 'ISNP': 'ISNP', 'TRA_76': 'IO', 'TRA_79': 'INO', 'RB3P': 'RB3P', 'TRA_51': 'CHBr3', 'ROH': 'ROH', 'PP': 'PP', 'ISOPNDO2': 'ISOPNDO2', 'HC5': 'HC5', 'TRA9': 'ACET', 'TRA_56': 'MOBA', 'MACRN': 'MACRN', 'DIBOO': 'DIBOO', 'MRO2': 'MRO2', 'INPN': 'INPN', 'GMAO_TEMP': 'GMAO_TEMP', 'PO2': 'PO2', 'ISOPND': 'ISOPND', 'TRA_48': 'HBr', 'TRA_1': 'NO', 'RA3P': 'RA3P', 'ISOPNB': 'ISOPNB', 'TRA_44': 'Br2', 'TRA_45': 'Br', 'TRA_46': 'BrO', 'TRA_47': 'HOBr', 'TRA_40': 'DST3', 'TRA_41': 'DST4', 'TRA_42': 'SALA', 'TRA_43': 'SALC', 'IEPOXOO': 'IEPOXOO', 'MAOP': 'MAOP', 'INO2': 'INO2', 'ETO2': 'ETO2', 'ISN1': 'ISN1', 'TRA_49': 'BrNO2', 'ETP': 'ETP', 'TRA5': 'ALK4', 'TRA4': 'CO', 'TRA_59': 'GLYC', 'TRA_58': 'HAC', 'TRA1': 'NO', 'R4P': 'R4P', 'TRA_3': 'PAN', 'TRA2': 'O3', 'TRA_53': 'CH3Br', 'TRA_52': 'CH2Br2', 'MAOPO2': 'MAOPO2', 'TRA_50': 'BrNO3', 'TRA_57': 'PROPNN', 'GMAO_VWND': 'GMAO_VWND', 'TRA_55': 'ISOPN', 'TRA_54': 'MPN', 'RIO2': 'RIO2', 'VRP': 'VRP', 'R4N1': 'R4N1', 'GMAO_PSFC': 'GMAO_PSFC', 'VRO2': 'VRO2', 'ISNOHOO': 'ISNOHOO', 'HC5OO': 'HC5OO', 'ETHLN': 'ETHLN', 'TRA_28': 'SO4s', 'TRA_29': 'MSA', 'TRA_26': 'SO2', 'TRA_27': 'SO4', 'TRA_24': 'MP', 'TRA_25': 'DMS', 'TRA_22': 'N2O5', 'R4O2': 'R4O2', 'TRA_20': 'CH2O', 'TRA_21': 'C2H6', 'TRA_77': 'HI', 'MVKN': 'MVKN', 'TRA_35': 'OCPI', 'TRA_78': 'OIO', 'MGLY': 'MGLY', 'PMNN': 'PMNN', 'TRA_39': 'DST2', 'TRA_38': 'DST1', 'ATO2': 'ATO2', 'TRA_34': 'BCPI', 'TRA_37': 'OCPO', 'TRA_36': 'BCPO', 'TRA_31': 'NH4', 'TRA_30': 'NH3', 'TRA_33': 'NITs', 'TRA_32': 'NIT', 'LISOPOH': 'LISOPOH', 'HO2': 'HO2', 'GMAO_ABSH': 'GMAO_ABSH', 'TRA8': 'H2O2', 'ISNOOB': 'ISNOOB', 'ISNOOA': 'ISNOOA', 'TRA_9': 'ACET', 'TRA_8': 'H2O2', 'TRA_7': 'HNO3', 'TRA_6': 'ISOP', 'TRA_5': 'ALK4', 'TRA_4': 'CO', 'TRA_3': 'PAN', 'TRA_2': 'O3', 'ISOPNBO2': 'ISOPNBO2', 'MOBAOO': 'MOBAOO', 'PMNO2': 'PMNO2', 'GLYX': 'GLYX', 'A3O2': 'A3O2', 'TRA_80': 'IONO', 'TRA_81': 'IONO2', 'TRA_82': 'I2O2', 'TRA_83': 'I2O3', 'TRA_84': 'I2O4', 'TRA_85': 'AERI', 'PYAC': 'PYAC', 'KO2': 'KO2', 'ATOOH': 'ATOOH', 'PRESS':'PRESS', 'U10M':'U10M'}, 'red_specs_f_name': ['O3', 'NO2', 'NO', 'NO3', 'N2O5', 'HNO4', 'HNO3', 'HNO2', 'PAN', 'PPN', 'PMN', 'H2O2', 'HO2', 'OH', 'RO2', 'SO2', 'SO4', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND', 'I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'], # Photolysis/Fast-J 'FastJ_lower' : [289.0, 298.25, 307.45, 312.45, 320.3, 345.0, 412.45], 'FastJ_upper' : [298.25, 307.45, 312.45, 320.3, 345.0, 412.45, 850.0], 'FastJ_mids' : [294,303,310,316,333,380,574], } if rtn_dict: return GC_var_dict else: return GC_var_dict[input_x] # -------------- # 4.05 - GEOS-Chem/ctm.bpch values # -------------- def latex_spec_name(input_x, debug=False): spec_dict = { 'OIO': 'OIO', 'C3H7I': 'C$_{3}$H$_{7}$I', 'IO': 'IO', 'I': 'I', 'I2': 'I$_{2}$', 'CH2ICl': 'CH$_{2}$ICl', 'HOI': 'HOI', 'CH2IBr': 'CH$_{2}$IBr', 'C2H5I': 'C$_{2}$H$_{5}$I', 'CH2I2': 'CH$_{2}$I$_{2}$', 'CH3IT': 'CH$_{3}$I', 'IONO': 'IONO','HIO3': 'HIO$_{3}$', 'ICl': 'ICl', 'I2O3': 'I$_{2}$O$_{3}$', 'I2O4': 'I$_{2}$O$_{4}$', 'I2O5': 'I$_{2}$O$_{5}$', 'INO': 'INO', 'I2O': 'I$_{2}$O', 'IBr': 'IBr','I2O2': 'I$_{2}$O$_{2}$', 'IONO2': 'IONO$_{2}$', 'HI':'HI', 'BrO':'BrO','Br':'Br','HOBr':'HOBr','Br2':'Br$_{2}$','CH3Br':'CH$_{3}$Br','CH2Br2':'CH$_{2}$Br$_{2}$', 'CHBr3':'CHBr$_{3}$','O3':'O$_{3}$', 'CO':'CO' , 'DMS':'DMS', 'NOx':'NOx', 'NO':'NO', 'NO2':'NO$_{2}$', 'NO3':'NO$_{3}$','HNO3':'HNO$_{3}$', 'HNO4':'HNO$_{4}$','PAN':'PAN', 'HNO2':'HNO$_{2}$', 'N2O5':'N$_{2}$O$_{5}$','ALK4':'>= C4 alkanes','ISOP':'Isoprene' ,'H2O2':'H$_{2}$O$_{2}$','ACET':'CH$_{3}$C(O)CH$_{3}$', 'MEK':'>C3 ketones', 'ALD2':'CH$_{3}$CHO', 'RCHO': 'CH$_{3}$CH$_{2}$CHO', 'MVK':'CH$_{2}$=CHC(O)CH$_{3}$', 'MACR':'Methacrolein', 'PMN':'CH$_{2}$=C(CH$_{3}$)C(O)OONO$_{2}$', 'PPN':'CH$_{3}$CH$_{2}$C(O)OONO$_{2}$', 'R4N2':'>= C4 alkylnitrates','PRPE':'>= C4 alkenes', 'C3H8':'C$_{3}$H$_{8}$','CH2O':'CH$_{2}$O', 'C2H6':'C$_{2}$H$_{6}$', 'MP':'CH$_{3}$OOH', 'SO2':'SO$_{2}$', 'SO4':'SO$_{4}$','SO4s':'SO$_{4}$ on SSA', 'MSA':'CH$_{4}$SO$_{3}$','NH3':'NH$_{3}$', 'NH4': 'NH$_{4}$', 'NIT': 'InOrg N', 'NITs': 'InOrg N on SSA', 'BCPI':'BCPI', 'OCPI':'OCPI', 'BCPO':'BCPO','OCPO':'OCPO', 'DST1':'DST1', 'DST2':'DST2','DST3':'DST3','DST4':'DST4','SALA':'SALA', 'SALC':'SALC', 'HBr':'HBr', 'BrNO2': 'BrNO$_{2}$', 'BrNO3': 'BrNO$_{3}$', 'MPN':'CH$_{3}$ON$_{2}$', 'ISOPN':'ISOPN', 'MOBA':'MOBA', 'PROPNN':'PROPNN', 'HAC':'HAC', 'GLYC':'GLYC', 'MMN':'MMN', 'RIP':'RIP', 'IEPOX':'IEPOX','MAP':'MAP', 'AERI':'Aerosol Iodine' , 'Cl2':'Cl$_{2}$', 'Cl':'Cl','HOCl':'HOCl','ClO':'ClO','OClO':'OClO','BrCl':'BrCl', 'HI+OIO+IONO+INO':'HI+OIO+IONO+INO','CH2IX':'CH$_{2}$IX', 'IxOy':'I$_{2}$O$_{X}$ ($_{X}$=2,3,4)', 'CH3I':'CH$_{3}$I', 'OH':'OH', 'HO2':'HO$_{2}$', 'MO2':'MO$_{2}$', 'RO2':'RO$_{2}$' ,'RD01':r'I + O$_{3}$ $\rightarrow$ IO + O$_{2}$' # Analysis names ,'iodine_all':'All Iodine', 'Iy': 'I$_{Y}$', 'IOy': 'IO$_{Y}$', 'IyOx': 'I$_{Y}$O$_{X}$', 'IOx': 'IO$_{X}$','iodine_all_A':'All Iodine (Inc. AERI)', 'I2Ox': 'I$_{2}$O$_{X}$' , 'AERI/SO4': 'AERI/SO4', 'EOH':'Ethanol' , 'PSURF': 'Pressure at the bottom of level', 'GMAO_TEMP' : 'Temperature', 'TSKIN' : 'Temperature at 2m', 'GMAO_UWND':'Zonal Wind', 'GMAO_VWND':'Meridional Wind', 'U10M':'10m Meridional Wind', 'V10M': '10m Zonal Wind', 'CH2OO':'CH$_{2}$OO', # Family Names 'N_specs' : 'NOy', 'NOy' : 'NO$_Y$', 'N_specs_no_I' : 'NOy exc. iodine', # typos 'CH2BR2':'CH$_{2}$Br$_{2}$', } return spec_dict[input_x] # -------------- # 4.06 - converts P/L tracer mulitpler to 1 # -------------- def p_l_unity(rxn, debug=False): p_l_dict = { 'LR24': 1.0, 'LR25': 1.0, 'LR26': 1.0, 'LR27': 1.0, 'LR20': 1.0, 'LR21': 1.0, 'LR22': 1.0, 'LR30': 1.0, 'LR31': 1.0, 'LR23': 1.0, 'LR28': 1.0, 'LR29': 1.0, 'RD09': 1.0, 'PO3_46': 0.25, 'LR3': 1.0, 'LR2': 1.0, 'RD02': 1.0, 'PO3_03': 0.3, 'PO3_14': 1.0, 'PO3_02': 0.15, 'PO3_05': 0.15 } return p_l_dict[rxn] # -------------- # 4.07 - Returns tracers unit and scale (if requested) # -------------- def tra_unit(x, scale=False, adjustment=False, adjust=True, \ global_unit=False, ClearFlo_unit=False, debug=False ): tra_unit = { 'OCPI': 'ppbv', 'OCPO': 'ppbv', 'PPN': 'ppbv', 'HIO3': 'pptv', 'O3': 'ppbv', 'PAN': 'ppbv', 'ACET': 'ppbC', 'RIP': 'ppbv', 'BrNO3': 'pptv', 'Br': 'pptv', 'HBr': 'pptv', 'HAC': 'ppbv', 'ALD2': 'ppbC', 'HNO3': 'ppbv', 'HNO2': 'ppbv', 'C2H5I': 'pptv', 'HNO4': 'ppbv', 'OIO': 'pptv', 'MAP': 'ppbv', 'PRPE': 'ppbC', 'HI': 'pptv', 'CH2I2': 'pptv', 'IONO2': 'pptv', 'NIT': 'ppbv', 'CH3Br': 'pptv', 'C3H7I': 'pptv', 'C3H8': 'ppbC', 'DMS': 'ppbv', 'CH2O': 'ppbv', 'CH3IT': 'pptv', 'NO2': 'ppbv', 'NO3': 'ppbv', 'N2O5': 'ppbv', 'CHBr3': 'pptv', 'DST4': 'ppbv', 'DST3': 'ppbv', 'DST2': 'ppbv', 'DST1': 'ppbv', 'HOCl': 'ppbv', 'NITs': 'ppbv', 'RCHO': 'ppbv', 'C2H6': 'ppbC', 'MPN': 'ppbv', 'INO': 'pptv', 'MP': 'ppbv', 'CH2Br2': 'pptv', 'SALC': 'ppbv', 'NH3': 'ppbv', 'CH2ICl': 'pptv', 'IEPOX': 'ppbv', 'ClO': 'ppbv', 'NO': 'pptv', 'SALA': 'ppbv', 'MOBA': 'ppbv', 'R4N2': 'ppbv', 'BrCl': 'pptv', 'OClO': 'ppbv', 'PMN': 'ppbv', 'CO': 'ppbv', 'CH2IBr': 'pptv', 'ISOP': 'ppbC', 'BCPO': 'ppbv', 'MVK': 'ppbv', 'BrNO2': 'pptv', 'IONO': 'pptv', 'Cl2': 'ppbv', 'HOBr': 'pptv', 'PROPNN': 'ppbv', 'Cl': 'ppbv', 'I2O2': 'pptv', 'I2O3': 'pptv', 'I2O4': 'pptv', 'I2O5': 'pptv', 'MEK': 'ppbC', 'MMN': 'ppbv', 'ISOPN': 'ppbv', 'SO4s': 'ppbv', 'I2O': 'pptv', 'ALK4': 'ppbC', 'MSA': 'ppbv', 'I2': 'pptv', 'Br2': 'pptv', 'IBr': 'pptv', 'MACR': 'ppbv', 'I': 'pptv', 'AERI': 'pptv', 'HOI': 'pptv', 'BrO': 'pptv', 'NH4': 'ppbv', 'SO2': 'ppbv', 'SO4': 'ppbv', 'IO': 'pptv', 'H2O2': 'ppbv', 'BCPI': 'ppbv', 'ICl': 'pptv', 'GLYC': 'ppbv', # Extra diagnostics to allow for simplified processing 'CH3I':'pptv', 'Iy':'pptv', 'PSURF': 'hPa', 'OH':'pptv', 'HO2':'pptv', 'MO2': 'pptv', 'NOy':'pptbv','EOH': 'ppbv' , 'CO':'ppbv', 'CH4':'ppbv', 'TSKIN':'K', 'GMAO_TEMP': 'K', 'GMAO_VWND' :'m/s','GMAO_UWND': 'm/s', 'RO2': 'pptv', 'U10M':'m/s','V10M': 'm/s' , 'PRESS': 'hPa', 'CH2OO':'pptv', } units = tra_unit[x] # Adjust to appropriate scale for pf analysis if adjust: spec_2_pptv = GC_var('spec_2_pptv') spec_2_pptC = GC_var('spec_2_pptC') if ( x in spec_2_pptv ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'pptv' ) units = 'pptv' if ( x in spec_2_pptC ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'pptC' ) units = 'pptC' # Over ride adjustments for globally appro. units if global_unit: spec_2_ppbv = GC_var('spec_2_ppbv') spec_2_ppbC = GC_var('spec_2_ppbC') if ( x in spec_2_ppbv ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'ppbv' ) units = 'ppbv' if ( x in spec_2_ppbC ): if (debug): print 'adjusting {} ({}) to {}'.format(x, units, 'ppbC' ) units = 'ppbC' if ClearFlo_unit: if any( [ x == i for i in [ 'NO', 'MACR', 'MVK' ] ] ): units = 'ppbv' if any( [ x == i for i in [ 'PAN' ] ] ): units = 'pptv' if any( [ x == i for i in [ 'ISOP' ] ] ): units = 'ppbC' if scale: scaleby = get_unit_scaling( units ) if adjustment: if units == 'K': units = 'Deg. Celcuis' adjustby = -273.15 else: adjustby = 0 if scale and (not adjustment): return units, scaleby elif (scale and adjustment): return units, scaleby, adjustby else: return units # -------------- # 4.08 - Store of directories for servers ( earth0, atmosviz1, and tms MBP) # ------------- # moved to AC_tools.funcs4core.py # -------------- # 4.09 - Ox in species # ------------- def Ox_in_species(in_=None, rxns=False, keys=False): species_Ox = { 'HOIdf': 1.0, 'OIOdf': 2.0, 'BrNO3df': 2.0, 'HNO3df': 1.0, 'PPNdf': 1.0, 'IOdf': 1.0, 'N2O5df': 3.0, 'IONOdf': 1.0, 'PMNdf': 1.0, 'BrNO2df': 1.0, 'I2O4df': 4, 'MPNdf': 1.0, 'NO3df': 2.0, 'BrOdf': 1.0, 'HOBrdf': 1.0, 'HNO4df': 1.0, 'O3df': 1.0, 'I2O2df': 2.0, 'NO2df': 1.0, 'IONO2df': 2.0, 'PANdf': 1.0, 'OIO': 2.0, 'BrO': 1.0, 'HOBr': 1.0, 'N2O5': 3.0, 'IONO': 1.0, 'MPN': 1.0, 'BrNO2': 1.0, 'I2O2': 2.0, 'I2O4': 4, 'PPN': 1.0, 'HOI': 1.0, 'HNO3': 1.0, 'IONO2': 2.0, 'NO2': 1.0, 'IO': 1.0, 'HNO4': 1.0, 'PMN': 1.0, 'O3': 1.0, 'BrNO3': 2.0, 'PAN': 1.0, 'NO3': 2.0 } rxn_Ox = { 'LO3_18': 1.0, 'LR25': 1.0, 'RD12': 2.0, 'LR21': 1.0, 'LO3_38': 1.0, 'LO3_10': 1.0, 'LO3_34': 1.0, 'LO3_35': 1.0, 'LO3_33': 1.0, 'LO3_30': 1.0, 'LR5': 2.0, 'LR6': 2.0, 'RD37': 2.0, 'LO3_05': 1.0, 'RD11': 2.0, 'LO3_06': 1.0, 'LO3_49': 1.0, 'LO3_04': 1.0, 'LO3_03': 1.0, 'LO3_02': 1.0, 'LO3_42': 1.0, 'LO3_41': 1.0, 'LO3_40': 1.0, 'LO3_47': 1.0, 'LO3_46': 1.0, 'LO3_09': 1.0, 'LO3_44': 1.0, 'LR30': 1.0, 'LO3_24': 1.0/2.0, 'LO3_21': 1.0, 'RD23': 2.0, 'LO3_54': 2.0, 'LO3_55': 1.0, 'LO3_08': 1.0, 'LO3_50': 1.0, 'LO3_51': 1.0, 'LO3_52': 1.0, 'LO3_53': 1.0, 'LR10': 1.0, 'LO3_36':1.0, # LO3_24 set to 1 (as 0.5*CoE) even though 2 Ox equivalents are lost, this allows for contribution to bromine and iodine loss to be inclued # LOX included for processing ease 'LOX':1.0, 'POX':1.0, 'PO3_14': 1.0, 'PO3_15':1.0 , 'RD98': 1.0, 'LO3_39':1.0 , 'RD63': 1.0, # for prod analysis 'PO3_69' : 1.0/2.0, 'PO3_35': 0.85, 'PO3_03':0.15/0.3, 'PO3_70': 0.4/1.4 , 'PO3_77': 1.0/2.0 , 'RD06':1.0, 'LR9':1.0 } if (rxns): return rxn_Ox[ in_ ] if (keys): return species_Ox.keys() else: return species_Ox[ in_ ] # ---- # 4.10 - Return dictionary of gaw sites # ---- def gaw_2_name(): wdf = get_dir('dwd') +'ozonesurface/' + 'gaw_site_list.h5' df= pd.read_hdf( wdf, 'wp', mode='r' ) names = df.values[:,1] # alter long name for CVO ind=[ n for n, i in enumerate(names) if ( i =='Cape Verde Atmospheric Observatory' )] names[ind[0]] = 'Cape Verde' return dict( zip( df.index, names )) # ---- # 4.11 - Returns list of gaw sites in HDF file of O3 surface data # ---- def get_global_GAW_sites(f='gaw_site_list_global.h5'): wd= get_dir('dwd') +'ozonesurface/' df= pd.read_hdf( wd+f, 'wp', mode='r' ) vars = sorted( list(df.index) ) # Kludge: remove those not in "grouped" analysis # ( Q: why are these sites not present? - A: data control for lomb-scragle) [ vars.pop( vars.index(i) ) for i in ['ZUG', 'ZSF', 'ZEP', 'WLG', 'USH', 'SDK', 'PYR', 'PUY', 'PAL', 'MKN', 'IZO', 'HPB', 'DMV', 'BKT', 'AMS', 'ALT', 'ABP'] ] #[ 'AMS', 'MKN', 'IZO' , 'WLG', 'PYR', 'USH', 'ABP', 'ALT'] ] return vars # ---- # 4.12 - returns lon/lat/alt for res # ---- # moved to AC_tools.funcs4core # -------- # 4.13 - Get CTM (GEOS-Chem) array dimension for a given resolution # -------- # moved to AC_tools.funcs4core # -------- # 4.14 - Convert gamap category/species name to Iris/bpch name # -------- def diagnosticname_gamap2iris( x ): d={ "IJ-AVG-$": 'IJ_AVG_S', "BXHGHT-$": 'BXHEIGHT', "PORL-L=$":'PORL_L_S__', 'DAO-3D-$':'DAO_3D_S__', 'DAO-FLDS' :'DAO_FLDS__', 'DRYD-FLX': 'DRYD_FLX__', 'DRYD-VEL':'DRYD_VEL__', 'CHEM-L=$':'CHEM_L_S__', 'WETDCV-$':'WETDCV_S__', 'WETDLS-$':'WETDLS_S__', 'WD-LSW-$':'WD_LSW_S__', 'WD-LSR-$':'WD_LSR_S__', 'UP-FLX-$':'UP_FLX_S__', 'NS-FLX-$': 'NS_FLX_S__', 'EW-FLX-$':'EW_FLX_S__', 'TURBMC-$': 'TURBMC_S__', 'OD-MAP-$':'OD_MAP_S__', # 'WD-FRC-$' 'MC-FRC-$': 'MC_FRC_S__', } return d[x] # -------- # 4.14 - Get scaling for a given unit # -------- def get_unit_scaling( units ): misc = 'K', 'm/s', 'unitless', 'kg' ,'m', 'm2','kg/m2/s', \ 'molec/cm2/s', 'mol/cm3/s', 'kg/s', 'hPa' if any( [ (units == i) for i in 'pptv', 'pptC' ]): scaleby = 1E12 elif any( [ (units == i) for i in 'ppbv', 'ppbC' ]): scaleby = 1E9 elif any( [units ==i for i in misc ] ): scaleby = 1 else: print 'WARNING: This unit is not in unit lists: ', units return scaleby # ------------------------------------------- Section 5 ------------------------------------------- # -------------- Misc # # -------------- # 5.01 - Return MUTD runs - no hal, just Br, just I, and I + Br # -------------- def MUTD_runs( standard=True, sensitivity=False, titles=False, \ IO_obs=False,no_I2Ox=False, respun=True, \ preindustrial=False, skip3=False, v10v92comp=False, \ nested_EU=False, just_bcase_no_hal=False, just_std=False, \ just_bcase_std=False, ver='1.6', res='4x5', \ debug=False): """ Dictionary storage for iGEOSChem most up to date (MUTD) runs. returns list of directories and titles """ if debug: print standard, sensitivity, titles, IO_obs, preindustrial, skip3,\ nested_EU, just_bcase_std, ver pwd = get_dir( 'rwd' ) l_dict= GC_var('latex_run_names') # Get version directories for versions d= { '4x5': { '1.6':'iGEOSChem_1.6_G5/', # '1.6':'iGEOSChem_1.6.1_G5/', '1.6.1':'iGEOSChem_1.6.1_G5/', '1.5': 'iGEOSChem_1.5_G5/' , '1.7': 'iGEOSChem_1.7_v10/'}, '2x2.5': { '1.6':'iGEOSChem_1.6_G5_2x2.5/' } }[res] d=d[ver] if sensitivity: l = [ 'no_hal', 'Just_Br', 'just_I', 'run', 'Just_I_org', 'I2Ox_double',\ 'I2Ox_half', 'het_double','het_half', 'no_het', 'Sulfate_up', \ 'MacDonald_iodide', 'I2Ox_phot_x2','I2Ox_phot_exp', 'no_I2Ox', \ 'BrO2pptv' ] r = [ d + i for i in l ] # adjust names to inc. repsin # if respun: # r = [ i+'.respun' for i in r ] if standard and (not any( [preindustrial,sensitivity, v10v92comp]) ): if just_bcase_std: l = [ 'Just_Br', 'run' ] # l = [ 'Just_Br', 'no_I2Ox' ] elif just_bcase_no_hal: l = [ 'no_hal', 'run' ] # l = [ 'no_hal', 'no_I2Ox' ] elif just_std: l = [ 'Just_Br' ] else: if ver == '1.7': # l = ['no_hal', 'run' ] l = ['Just_Br', 'run' ] else: l = ['no_hal', 'Just_Br', 'just_I', 'run' ] # if any( [ (ver ==i) for i in '1.5', '1.6' ] ) : r = [ d + i for i in l ] if nested_EU: d= 'iGEOS_Chem_1.6_G5_NPOINTS/' if just_bcase_std: l = [ 'Just_Br', 'run' ] else: l = [ 'no_hal', 'run' ] r = [ d+i for i in l ] # Setup latex titles list if titles and (not any( [preindustrial, v10v92comp]) ) and ( standard or \ sensitivity or nested_EU): l = [ l_dict[i] for i in l ] if v10v92comp: if just_bcase_std: l = [ 'Just_Br', 'run' ] else: l = [ 'no_hal', 'run' ] r= [ 'iGEOSChem_1.7_v10/'+ i for i in l ]+ \ [ 'iGEOS_Chem_1.6_G5/'+ i for i in l ] l= [ 'GEOS-Chem v10 (no hal)', 'Iodine Sim. (v10)'] + \ [ 'GEOS-Chem v9-2 (no hal)', 'Iodine Sim.'] if IO_obs: l = 'run_CVO', 'run_GRO', 'run_MAL', 'run_HAL', 'run_TOR_updated.respun' r = [d+ i for i in l] if no_I2Ox: r = [ i+'_no_I2Ox' for i in r ] if preindustrial: r = [ 'iGEOS_Chem_1.5_G5/no_hal', 'iGEOS_Chem_1.5_G5/run' , \ 'iGEOS_Chem_1.5_G5_preindustrial_no_hal/no_hal', \ 'iGEOS_Chem_1.5_G5_preindustrial/run' ] l = [ '(I-,Br-)', '(I+,Br+)' , '(I-,Br-) - 1750', '(I+,Br+) - 1750' ] if debug: print [pwd + i for i in r ], l if debug: [pwd + i for i in r ] if skip3: [ [ i.pop(0) for i in l, r ] for ii in range(3) ] if titles: return [pwd + i for i in r ], l else: return [pwd + i for i in r ] # -------------- # 5.02 - Store of constants for use by funcs/progs # -------------- def constants(input_x, debug=False): """ Dictionary storing commonly used constants """ con_dict ={ 'RMM_air' : ( .78*(2.*14.)+.22*(2.*16.) ) , 'AVG' : 6.0221413E23, 'mol2DU': 2.69E20 } return con_dict[input_x] # ---------------- Section 6 ------------------------------------------- # -------------- Dynamic processing of p/l # # ------------- # 6.01 - Extract reactions to form a dictionary of active reactions # ------------- def rxn_dict_from_smvlog( wd, PHOTOPROCESS=None, ver='1.7', LaTeX=False, debug=False ): """ build a dictionary reaction of reaction details from smv.log This can be used as an external call to analyse other prod/loss reactions through smvgear """ if isinstance( PHOTOPROCESS, type(None) ): PHOTOPROCESS = { '1.6' : 457, '1.7' : 467 }[ver] fn = 'smv2.log' if debug: print wd+'/'+fn file_ = open( wd+'/'+fn, 'rb' ) readrxn = False for row in file_: row = row.split() if 'NMBR' in row: readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' rxns = [ i for i in rxns if ( 'NMBR' not in i ) ] n = [int(rxn[0]) for rxn in rxns ] rxns = [rxn[1:] for rxn in rxns ] rdict = dict( zip(n, rxns) ) # --- Process to Latex if LaTeX: for rxn in sorted(rdict.keys() ): # -- Use Latex formatting? if rxn > PHOTOPROCESS-1: # xarstr = r' $\xrightarrow{hv}$ ' xarstr = r' + hv $\rightarrow$ ' else: xarstr = r' + M $\rightarrow$ ' # --- get all print on a per tag basis the coe, rxn str try: rxn_str = ''.join( rdict[rxn][4:] ) if LaTeX: rxn_str = rxn_str.replace('++=', xarstr) rxn_str = rxn_str.replace('+', ' + ') rxn_str = rxn_str.replace('+ =', r' $\rightarrow$ ' ) else: pass if debug: print rxn_str try: rxn_strs += [ rxn_str ] rxns += [ rxn ] except: rxn_strs = [ rxn_str ] rxns = [ rxn ] except: print '!'*100, 'ERROR HERE: {} {}'.format( rxn, rxn_str ) rdict = dict( zip(rxns, rxn_strs ) ) return rdict # ------------- # 6.02 - Extract reactions tracked by prod loss diag for a given p/l family # ------------- def rxns_in_pl( wd, spec='LOX', debug=False ): """ Extract reactions tracked by p/l family in smvgear """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) # if debug: if True: print file_ readrxn = False for row in file_: row = row.split() if all( [ i in row for i in 'Family','coefficient' , 'rxns', spec] ): readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- Check that rxns ahave been found? if len( rxns ) < 1: print 'ERROR: No rxns. found for >{}<, correct family?'.format( spec ) sys.exit(0) # -- remove 'Family' rxns = [ i for i in rxns if ( 'Family' not in i ) ] n = [int(rxn[1]) for rxn in rxns ] rxns = [rxn[2:] for rxn in rxns ] rdict = dict( zip(n, rxns) ) return rdict # ------------- # 6.03 - Extract reaction infomation for given tracer tags # ------------- def rxn4pl( pls, wd='example/example', rdict=None, reduce_size=True, \ debug=False ): """ Get information on reaction in smvgear from a provide reaction tag """ # --- Get Dict of reaction detail if debug: print 'rxn4pl called' if isinstance(rdict, type(None) ): # Get Dict of all reactions, Keys = #s rdict = rxn_dict_from_smvlog(wd) if debug: for i in rdict.keys(): if any( [ (s_ in ''.join( rdict[i] ) ) for s_ in pls ] ): print i, 'yes' # print rdict#.keys() # --- Indices for # reduce dict size if reduce_size: keys = [ i for i in rdict.keys() if \ any( [ (s_ in ''.join( rdict[i] ) ) for s_ in pls ]) ] # re-make dictionary rdict = dict( zip( keys, [rdict[i] for i in keys] )) # loop via pl keys = np.array([ [ pl, [ i for i in rdict.keys() \ if any( [ (pl in ''.join( rdict[i] ) ) ]) ][0] ] for pl in pls ]) # --- Return as reactions referenced by tag return dict( zip( keys[:,0], [ rdict[int(i)] for i in keys[:,1] ]) ) # ------------- # 6.04 - Construct a list of indicies for each fam from given tags # ------------- def get_indicies_4_fam( tags, fam=False, IO_BrOx2=False, rtnspecs=False, NOy_as_HOx=True, debug=False ): """ Return indicies (in list form) for in a given family """ # assign family # famsn = [ 'Photolysis','HOx','NOy' ,'Bromine', 'Iodine' ] famsn = [ 'Photolysis','HOx' ,'Bromine', 'Iodine' ] fams = [] for tag in tags: fams.append( get_tag_fam( tag) ) # if rm NOy family (treat as NOx) if NOy_as_HOx: fams = [x if (x!='NOy') else 'HOx' for x in fams] fd = dict(zip(tags, fams) ) ll = [] [ ll.append([]) for i in famsn ] for n, tag in enumerate( tags ) : for fn in range(len(famsn)): if fd[tag] == famsn[fn] : ll[fn].append( n) if fam: # Kludge - to allow for counting Ox loss via IO +BrO 50/50, # ( add extra loss tag. ) if IO_BrOx2: ll[famsn.index('Bromine')].append( max([max(i) for i in ll])+1 ) fams = fams +[ 'Bromine' ] tags = tags + [ 'LO3_24'] if rtnspecs: return ll, fams, tags else: return ll, fams else: return ll # ------------- # 6.05 - Get tags for reactions # ------------- def get_p_l_tags( rxns, debug=False): """ get p/l tags for a given smvgear reaction """ # (PD??, RD??, LO3_??, PO3_??, LR??) for rxn in rxns: if debug: print [ i for i in rxn if any( [x in i for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] tags = [i for i in rxn if any( [x in i for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] try: tagsl.append( tags) except: tagsl = [tags] return tagsl # ------------- # 6.06 - extract reactions tracked by prod loss diag in input.geos # ------------- def p_l_species_input_geos( wd, ver='1.7', rm_multiple_tagged_rxs=False ): """ Extract prod/loss species (input.geos) and reaction tags (globchem.dat) """ # find and open input.geos file fn = glob.glob(wd+'/*input.geos*')[0] file_ = open( fn, 'rb' ) # Read in just the prod loss section strs_in_1st_line = 'Number', 'of', 'P/L', 'families' section_line_divider = '------------------------+----------'+ \ '--------------------------------------------' readrxn = False for row in file_: row = row.split() # once at prod/loss section, start added to list if all( [ i in row for i in strs_in_1st_line ]): readrxn=True # if not at end of prod/loss section, add to list if section_line_divider in row: readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- Only consider 'Family' ( no headers e.g. 'families' ) rxns = [ i for i in rxns if ( 'families' not in i ) ] rxns = [ [ i.replace(':','') for i in r ] for r in rxns ] # Kludge, adjust for extra space 12-99 # ( This is no longer required for 1.7 + ) if ver == '1.6': [i.pop(0) for i in rxns if ('th' not in i[0]) ] # Extract just PD (input.geos) and vars (globchem.dat vars ) PD = [rxn[4] for rxn in rxns ] vars = [rxn[5:] for rxn in rxns ] # remove p/l with muliple values ( start from 12th input) - Kludge? if rm_multiple_tagged_rxs: PD, vars = [ i[11:] for i in PD, vars ] vars = [ i[0] for i in vars ] return PD, vars # ------------- # 6.07 - extract all active tags from smv.log # ------------- def tags_from_smvlog( wd ): #, spec='LOX' ): """ Get all active p/l tags in smvgear ( from smv2.log ) """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) readrxn = False for row in file_: row = row.split() if all( [(i in row) for i in ['NBR', 'NAME', 'MW', 'BKGAS(VMRAT)'] ]): readrxn=True if len(row) < 1 : readrxn=False if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' rxns = [ i for i in rxns if ( 'NBR' not in i ) ] rxns = [rxn[1] for rxn in rxns ] # --- only consider tags return [i for i in rxns if any( [x in i \ for x in 'PD', 'RD', 'PO3','LO3' , 'LR' ]) ] # ------------- # 6.08 - extract all active PDs from smv.log # ------------- def PDs_from_smvlog( wd, spec='LOX' ): """ Get all active PDs tags in smvgear ( from smv2.log ) """ fn = 'smv2.log' file_ = open( wd+'/'+fn, 'rb' ) readrxn = False leniency = 0 entries_in_title = ['Families', 'for', 'prod', 'or', 'loss', 'output:'] for row in file_: row = row.split() if all( [(i in row) for i in entries_in_title ]): readrxn=True leniency = 1 if len(row) < 1 : if leniency < 0: readrxn=False leniency -= 1 if readrxn: try: rxns.append( row ) except: rxns = [ row ] # -- remove 'NMBR' exceptions = [ 'SPECIES', '===============================================================================','Families'] rxns = [ i for i in rxns if all( [ (ii not in i) for ii in exceptions ]) ] rxns = [j for k in rxns for j in k ] return rxns # ------------- # 6.09 - Give all reactions tag is active within # ------------- def rxns4tag( tag, rdict=None, ver='1.7', wd=None ): """ get a list of all reactions with a given p/l tag """ # --- get reaction dictionary if rdict == None: rdict = rxn_dict_from_smvlog( wd, ver=ver ) # --- Caveats - to adapt for long line errors in fortran written output errs = ['LO3_36'] cerrs = ['RD95'] if any([ (tag == i) for i in errs ] ): tag = cerrs[ errs.index( tag) ] # -- loop reactions, if tag in reaction return reaction rxns = [] for n, rxn in enumerate( rdict.values() ): # if any( [tag in i for i in rxn]): if any( [(i.endswith(tag) ) for i in rxn]): rxns.append( [rdict.keys()[n] ]+ rxn ) return rxns # ------------- # 6.10 - get details for a given tag # ------------- def get_tag_details( wd, tag=None, PDs=None, rdict=None, \ PHOTOPROCESS=None, ver='1.7', LaTeX=False, print_details=False, \ debug=False ): """ Retriveve prod/loss tag details from smv.log ( rxn number + reaction description)""" # what is the number of the first photolysis reaction? if isinstance( PHOTOPROCESS, type(None) ): PHOTOPROCESS = { '1.6' : 457, '1.7' : 467 }[ver] # --- get all reactions tags are active in smv.log if rdict == None: rdict = rxn_dict_from_smvlog( wd, ver=ver ) trxns = rxns4tag( tag, wd=wd, rdict=rdict ) # --- get all print on a per tag basis the coe, rxn str try: rxn_str = ''.join( trxns[0][5:9]) # -- Use Latex formatting? # if LaTeX: # # setup latex arrows and replace existing arrows # if trxns[0][0] > PHOTOPROCESS-1: # xarstr = r' $\xrightarrow{hv}$ ' # else: # xarstr = r' $\xrightarrow{M}$ ' # rxn_str = rxn_str.replace('++=', xarstr).replace('+', ' + ') # rxn_str = rxn_str.replace('+ =', r' $\rightarrow$ ' ) # else: # pass if debug: print rxn_str dets = [ tag, trxns[0][0], rxn_str ] except: print '!'*100, 'ERROR HERE: {} {}'.format( tag, trxns ) if print_details: print dets # --- return a dictionary of all active tagged reactions details by tag : PD, number, rxn str, coeeffiecn else: return dets # ------------- # 6.11 - Takes a reaction number add gives the Ox Coe # ------------- def get_rxn_Coe(wd, num, tag, nums=None, rxns=None, tags=None, \ Coe=None, spec='LOX', debug=False): """ Retrieve given reaction coefficient for smvgear (from smv2.log) """ # --- get dictionaries for reactions within if all( [ (i == None) for i in nums, rxns, tags, Coe ] ): nums, rxns, tags, Coe = prod_loss_4_spec( wd, spec, all_clean=True ) # Pull reaction coefficient Coe_dict = dict(zip(nums, Coe) ) Coe = float(Coe_dict[ num ]) if ('P' not in tag ): # consider all change positive - Kludge due to the assignment approach. Coe = Coe*-1.0 # --- over write Ox in species with prod_mod_tms values try: Coe = Ox_in_species(tag, rxns=True) if debug: print 'using values from Ox_in_species' except: if debug: print 'using values from smv.log @: {}'.format(wd) return Coe # -------------- Section 7 ------------------------------------------- # -------------- Observational Variables # # 5.02 - obs sites (e.g. deposition locs, lons, alts ) # 5.04 - Store site locations ( LAT, LON, ALT) # -------------- # 7.01 - open ocean IO data # -------------- def IO_obs_data(just_IO=False): """ Dictionary of open ocean IO observations for automated comparisons key = ref (e.g. name_year ) values = alt, lon, lat, times, full_name , IO (avg), BrO (avg), CAM-Chem IO, CAM-Chem BrO, group """ IO_obs_dict={'Read_2008': [0.0, -24.87, 16.85, 6.0, 'Read, 2008', '1', '2', '1', '2', 'Leeds'], 'Weddel_Sea': [0.03, -50.0, 75.0, 10.0, 'Weddel Sea', '-', '-', '-', '-', '-'], 'Oetjen_2009': [0.0, 73.5, 5.0, 2.0, 'Oetjen, 2009', '2.4', '-', '1', '-', 'Leeds'], 'Jones_2010_II': [0.0, -19.0, 30.0, 7.0, 'Jones, 2010, RHaMBLe II (26-36N)', '-', '-', '-', '-', '-'], 'Leser_2003': [0.0, -24.87, 16.85, 10.0, 'Leser, 2003', '-', '3.6', '-', '0.8', 'Heidelberg'], 'Jones_2010_I': [0.0, -19.0, 20.0, 7.0, 'Jones, 2010, RHaMBLe I (15-25N)', '-', '-', '-', '-', '-'], 'Halley_BAS': [0.03, -26.34, 75.35, 10.0, 'Halley, BAS', '-', '-', '-', '-', '-'], 'Theys_2007': [0.0, -20.9, -55.5, 8.0, 'Theys, 2007', '-', '<0.5', '-', '0.8', 'Belgium'], 'Dix_2013': [9.0, -160.0, 10.0, 1.0, 'Dix, 2013', '0.1', '-', '', '-', 'NCAR'], 'Allan_2000': [0.162, -16.6, 28.4, 6.0, 'Allan, 2000', '1.2', '-', '0.4', '-', 'Leeds'], 'Jones_2010_MAP': [0.0, -10.0, 55.0, 6.0, 'Jones, 2010, MAP', '-', '-', '-', '-', '-'], 'Grobmann_2013': [0.0, 150.0, 15.0, 1.0, 'Grobmann, 2013', '0.72-1.8', '-', '-', '-', 'Heidelberg'], 'Dix_2013_j_comp': [9.0, -160.0, 10.0, 5.0, 'Dix, 2013 (wrong month to allow with jones runs comparisons)', '-', '-', '-', '-', '-'], 'schonart_2009_II': [0.0, -80.0, -20.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Martin_2009': [0.0, -24.87, 16.85, 2.0, 'Martin, 2009', '-', '<3.0', '-', '1.2', 'Heidelberg'], 'schonart_2009_I': [0.0, -90.0, -5.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Yokouchi_2013': [0.0, 120.0, 15.0, 2.0, 'Yokouchi, 2013', '-', '-', '-', '-', '-'], 'Butz_2009': [0.045, -44.4, -2.4, 12.0, 'Butz, 2009', '0.1', '~1.0', '0.02', '0.5', 'Leeds']} IO_obs_dict_just = {'Read_2008': [0.0, -24.87, 16.85, 6.0, 'Read, 2008', '1', '2', '1', '2', 'Leeds'], 'schonart_2009_II': [0.0, -80.0, -20.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Oetjen_2009': [0.0, 73.5, 5.0, 2.0, 'Oetjen, 2009', '2.4', '-', '1', '-', 'Leeds'], 'Allan_2000': [0.162, -16.6, 28.4, 6.0, 'Allan, 2000', '1.2', '-', '0.4', '-', 'Leeds'], 'Leser_2003': [0.0, -24.87, 16.85, 10.0, 'Leser, 2003', '-', '3.6', '-', '0.8', 'Heidelberg'], 'Theys_2007': [0.0, -20.9, -55.5, 8.0, 'Theys, 2007', '-', '<0.5', '-', '0.8', 'Belgium'], 'Dix_2013': [9.0, -160.0, 10.0, 1.0, 'Dix, 2013', '0.1', '-', '', '-', 'NCAR'], 'Grobmann_2013': [0.0, 135.0, 15.0, 1.0, 'Grobmann, 2013', '1.095', '-', '-', '-', 'Heidelberg'], 'schonart_2009_I': [0.0, -90.0, -5.0, 10.0, 'Schonart, 2009 (satilitte)', '3.3', '-', '1', '-', 'Bremen'], 'Martin_2009': [0.0, -24.87, 16.85, 2.0, 'Martin, 2009', '-', '<3.0', '-', '1.2', 'Heidelberg'], 'Butz_2009': [0.045, -44.4, -2.4, 12.0, 'Butz, 2009', '0.1', '~1.0', '0.02', '0.5', 'Leeds'], 'Mahajan_2010': [0., -100, -10, 4.0, 'Mahajan, 2010', '0.58', '-', '-', '-', 'Leeds'] } if (just_IO): return IO_obs_dict_just else: return IO_obs_dict # -------------- # 7.02 - BAE flight ID to date # ------------- def bae_flight_ID_2_date( f_ID, debug=False): """ BAE flight flight ID to date for CAST campaign """ if (debug): print 'bae_flight_ID_2_date called' f_ID_dict = {'847':'2014-02-18','846':'2014-02-17','845':'2014-02-17', '844':'2014-02-16','843':'2014-02-15','842':'2014-02-17','840':'2014-02-13','839':'2014-02-12','838':'2014-02-05','837':'2014-02-04','836':'2014-02-04','835':'2014-02-03','834':'2014-02-01','833':'2014-02-01','832':'2014-01-30','831':'2014-01-30','830':'2014-01-30','829':'2014-01-28','828':'2014-01-26','827':'2014-01-26','826':'2014-01-25','825':'2014-01-24','824':'2014-01-21','823':'2014-01-18'} return f_ID_dict[f_ID] # -------------- # 7.03 - details on flights from FAAM's NETCDF core data files, # -------------- def CAST_flight(all=True, CIMS=False, CIMSII=False): """ Callable dictionary of CAST flight details removed 'CAST_flight, 'b822': '20140108'' """ flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '1932', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140121', '1956', '20140122', '0413', 1390334165, 1390363990], 'b825': ['20140124', '1940', '20140125', '0140', 1390592404, 1390614058], 'b826': ['20140125', '0128', '20140125', '0637', 1390613301, 1390631839], 'b827': ['20140126', '0041', '20140126', '0445', 1390696903, 1390711511], 'b823': ['20140118', '1713', '20140119', '0147', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '1901', '20140205', '0318', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0627', 1391477699, 1391495264], 'b835': ['20140203', '2101', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0610', '20140201', '1217', 1391235033, 1391257044], 'b833': ['20140201', '0045', '20140201', '0606', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1131', 1391062943, 1391081485], 'b831': ['20140130', '0101', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0239', '20140129', '0830', 1390963178, 1390984251], 'b846': ['20140217', '2058', '20140218', '0510', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '1857', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0753', 1392607430, 1392623590], 'b842': ['20140214', '0508', '20140214', '1035', 1392354530, 1392374130], 'b843': ['20140215', '1903', '20140216', '0400', 1392490986, 1392523256], 'b840': ['20140213', '0005', '20140213', '0658', 1392249904, 1392274686], 'b841': ['20140214', '0004', '20140214', '0458', 1392336265, 1392353901]} if (CIMS): flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '2322', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140122', '0155', '20140122', '0411', 1390334165, 1390363990], 'b825': ['20140124', '2230', '20140125', '0034', 1390592404, 1390614058], 'b826': ['20140125', '0128', '20140125', '0637', 1390613301, 1390631839], 'b827': ['20140126', '0041', '20140126', '0445', 1390696903, 1390711511], 'b823': ['20140118', '2006', '20140119', '0055', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '2333', '20140205', '0315', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0608', 1391477699, 1391495264], 'b835': ['20140203', '2316', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0610', '20140201', '1220', 1391235033, 1391257044], 'b833': ['20140201', '0145', '20140201', '0606', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1123', 1391062943, 1391081485], 'b831': ['20140130', '0201', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0249', '20140129', '0825', 1390963178, 1390984251], 'b846': ['20140217', '2058', '20140218', '0510', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '1857', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0750', 1392607430, 1392623590], 'b842': ['20140214', '0611', '20140214', '1011', 1392354530, 1392374130], 'b843': ['20140215', '2227', '20140216', '0324', 1392490986, 1392523256], 'b840': ['20140213', '0304', '20140213', '0648', 1392249904, 1392274686], 'b841': ['20140214', '0004', '20140214', '0456', 1392336265, 1392353901]} if (CIMSII): flight_dict={'b828': ['20140126', '0559', '20140126', '0930', 1390715993, 1390728623], 'b829': ['20140128', '2322', '20140129', '0224', 1390937523, 1390962280], 'b824': ['20140122', '0155', '20140122', '0411', 1390334165, 1390363990], 'b825': ['20140124', '2230', '20140125', '0034', 1390592404, 1390614058], 'b826': ['20140125', '0141', '20140125', '0619', 1390613301, 1390631839], 'b827': ['20140126', '0100', '20140126', '0442', 1390696903, 1390711511], 'b823': ['20140118', '2006', '20140119', '0055', 1390065184, 1390096069], 'b839': ['20140212', '0320', '20140212', '0824', 1392175209, 1392193471], 'b838': ['20140205', '2246', '20140206', '0545', 1391640363, 1391665549], 'b837': ['20140204', '2333', '20140205', '0315', 1391540463, 1391570305], 'b836': ['20140204', '0134', '20140204', '0608', 1391477699, 1391495264], 'b835': ['20140203', '2338', '20140204', '0104', 1391461263, 1391475894], 'b834': ['20140201', '0619', '20140201', '1210', 1391235033, 1391257044], 'b833': ['20140201', '0223', '20140201', '0558', 1391215547, 1391234764], 'b832': ['20140130', '0622', '20140130', '1123', 1391062943, 1391081485], 'b831': ['20140130', '0201', '20140130', '0614', 1391043666, 1391062492], 'b830': ['20140129', '0249', '20140129', '0825', 1390963178, 1390984251], 'b846': ['20140218', '0035', '20140218', '0507', 1392670683, 1392700201], 'b847': ['20140218', '0503', '20140218', '0857', 1392699809, 1392713860], 'b844': ['20140216', '2224', '20140217', '0324', 1392577026, 1392607467], 'b845': ['20140217', '0323', '20140217', '0750', 1392607430, 1392623590], 'b842': ['20140214', '0611', '20140214', '1011', 1392354530, 1392374130], 'b843': ['20140215', '2227', '20140216', '0324', 1392490986, 1392523256], 'b840': ['20140213', '0342', '20140213', '0648', 1392249904, 1392274686], 'b841': ['20140214', '0050', '20140214', '0456', 1392336265, 1392353901]} if (all) : return flight_dict # -------------- # 7.04 - Iodocarbon obs. meta data # -------------- def iodocarbon_obs(): """ dictionary of Iodocarbon observations for automated analysis/comparions with observations """ Org_obs= { 'CH3IT' : [ \ ['Chuck et al (2005)' , list(np.linspace( -36, -49, 3) ) + \ list( np.linspace( -20, 28, 3) ), [-20] *6, [0.71]*3 + [ 1.94]*3 ] ] , 'CH2ICl' : [ \ ['Chuck et al (2005)' , list( np.linspace( -36, -49, 3 ) )+ \ list(np.linspace( -20, 28, 3) ), [-20] *6, [0.23]*3+ [0.32]*3 ],\ ['Jones et al (2010)' , np.linspace( 60, 15, 5), [-15] *5, [0.12]*5 ] ], 'CH2I2' : [ \ ['Jones et al (2010)' , np.linspace( 60,15, 5), [-15] *5, [0.01]*5 ] ], 'CH2IBr' : [ \ ['Jones et al (2010)' , np.linspace( 60, 15, 5), [-15] *5, [0.01]*5 ] ], 'C2H5I' : [ \ ['Jones et al (2010)' , np.linspace( 26, 36, 3), [120] *3, [0.09]*3 ] ], 'I2' : [ ['Lawler et al (2014)' , [16.51] , [-24], [0.2] ] ], } return Org_obs # -------------- # 7.05 - Stores locations for use by funcs/progs - # -------------- def get_loc(loc=None, rtn_dict=False, debug=False): """ Dictionary to store locations for automated analysis Data arranged: LON, LAT, ALT - double up? ( with 5.02 ?? ) """ loc_dict ={ 'GUAM' : ( 144.800, 13.500, 0 ), 'CHUUK' : ( 151.7833, 7.4167, 0 ), 'PILAU' : ( 134.4667,7.3500, 0 ), 'London': ( -0.1275, 51.5072, 0 ), 'Weyborne' : (1.1380, 52.9420, 0 ), # 'Cape Verde': (16.848, -24.871, 0 ), # 'CVO': (16.848, -24.871, 0 ), 'Cape Verde': ( -24.871, 16.848, 0 ), 'CVO': (-24.871,16.848, 0 ), 'North Ken' : (-0.214174, 51.520718, 0), 'KEN' : (-0.214174, 51.520718, 0), 'BT tower': (-0.139055, 51.521556, 190), 'BTT': (-0.139055, 51.521556, 190) } if rtn_dict: return loc_dict else: return loc_dict[loc] # -------------- # 7.06 - Get Locations of observations (lats, lons, alts ) for given sites # -------------- def get_obs_loc(loc, debug=False): """ Dictionary to store groups of locations for automated analysis """ d = { 'Denmark' :[ [ 68.35, 59.85, 56.13, 55.69 ], [ 18.81, 17.63, 13.05, 12.10 ] ], # split by obs site... 'Denmark1': [[68.35], [18.81]], 'Denmark2': [[59.85], [17.63]], 'Denmark3': [[56.13], [13.05]], 'Denmark4': [[55.69], [12.1]] , # Berlin, bonn, hamburg, Westerland, Munich, Brotjacklriegel, Deuselbach, Schauinsland 'Germany' :[ [52.5167, 50.7340, 53.5653,54.9100, 52.4231, 48.1333, \ 48.491, 49.4508, 47.91111 ] , [ 13.3833 , 7.0998, 10.0014, 8.3075, 10.7872, 11.5667, 13.133, 7.302, \ 7.8894] ], 'Weyborne' :[ [ 52.9420], [1.1380 ] ] } return d[loc] # -------------- # 7.07 - sonde station variables (list of 432 sondes) # ------------- def sonde_STNs(): """ Dictionary of WOUDC sonde location variables """ sonde_dict = { 101: ['SYOWA', 101.0, -69.0, 39.58, 22.0, 'JPN', 'ANTARCTICA'], 104: ['BEDFORD', 104.0, 42.45, -71.267, 80.0, 'USA', 'IV'], 105: ['FAIRBANKS (COLLEGE)', 105.0, 64.817, -147.867, 138.0, 'USA', 'IV'], 107: ['WALLOPS ISLAND', 107.0, 37.898, -75.483, 13.0, 'USA', 'IV'], 108: ['CANTON ISLAND', 108.0, -2.76, -171.7, 3.0, 'USA', 'V'], 109: ['HILO', 109.0, 19.5735, -155.0485, 11.0, 'USA', 'V'], 111: ['AMUNDSEN-SCOTT (SOUTH POLE)', 111.0, -89.983, 0.0, 2820.0, 'ATA', 'ANTARCTICA'], 131: ['PUERTO MONTT', 131.0, -41.45, -72.833, 5.0, 'CHL', 'III'], 132: ['SOFIA', 132.0, 42.817, 23.383, 588.0, 'BGR', 'VI'], 137: ['TOPEKA', 137.0, 39.067, -95.633, 270.0, 'USA', 'IV'], 138: ['CHRISTCHURCH', 138.0, -43.483, 172.55, 34.0, 'NZL', 'V'], 149: ['OVEJUYO (LA PAZ)', 149.0, -16.517, -68.033, 3420.0, 'BOL', 'III'], 156: ['PAYERNE', 156.0, 46.49, 6.57, 491.0, 'CHE', 'VI'], 157: ['THALWIL', 157.0, 46.817, 8.455, 515.0, 'CHE', 'VI'], 163: ['WILKES', 163.0, -66.25, 110.517, 12.0, 'USA', 'ANTARCTICA'], 174: ['LINDENBERG', 174.0, 52.21, 14.12, 112.0, 'DEU', 'VI'], 175: ['NAIROBI', 175.0, -1.267, 36.8, 1745.0, 'KEN', 'I'], 181: ['BERLIN/TEMPLEHOF', 181.0, 52.467, 13.433, 50.0, 'DEU', 'VI'], 187: ['PUNE', 187.0, 18.553, 73.86, 559.0, 'IND', 'II'], 190: ['NAHA', 190.0, 26.2, 127.683, 27.0, 'JPN', 'II'], 191: ['SAMOA', 191.0, -14.25, -170.56, 82.0, 'ASM', 'V'], 194: ['YORKTON', 194.0, 51.263, -102.467, 504.0, 'CAN', 'IV'], 197: ['BISCARROSSE/SMS', 197.0, 44.367, -1.233, 18.0, 'FRA', 'VI'], 198: ['COLD LAKE', 198.0, 54.783, -110.05, 702.0, 'CAN', 'IV'], 199: ['BARROW', 199.0, 71.317, -156.635, 11.0, 'USA', 'IV'], 203: ['FT. SHERMAN', 203.0, 9.33, -79.983, 57.0, 'PAN', 'IV'], 205: ['THIRUVANANTHAPURAM', 205.0, 8.483, 76.97, 60.0, 'IND', 'II'], 206: ['BOMBAY', 206.0, 19.117, 72.85, 145.0, 'IND', 'II'], 210: ['PALESTINE', 210.0, 31.8, -95.717, 121.0, 'USA', 'IV'], 213: ['EL ARENOSILLO', 213.0, 37.1, -6.733, 41.0, 'ESP', 'VI'], 217: ['POKER FLAT', 217.0, 65.133, -147.45, 357.5, 'USA', 'IV'], 219: ['NATAL', 219.0, -5.71, -35.21, 30.5, 'BRA', 'III'], 221: ['LEGIONOWO', 221.0, 52.4, 20.967, 96.0, 'POL', 'VI'], 224: ['CHILCA', 224.0, -12.5, -76.8, -1.0, 'PER', 'III'], 225: ['KOUROU', 225.0, 5.333, -52.65, 4.0, 'GUF', 'III'], 227: ['MCDONALD OBSERVATORY', 227.0, 30.666, -90.933, 2081.0, 'USA', 'IV'], 228: ['GIMLI', 228.0, 50.633, -97.05, 228.0, 'CAN', 'IV'], 229: ['ALBROOK', 229.0, 8.983, -79.55, 66.0, 'PAN', 'IV'], 231: ['SPOKANE', 231.0, 47.667, -117.417, 576.0, 'USA', 'IV'], 233: ['MARAMBIO', 233.0, -64.233, -56.623, 196.0, 'ATA', 'ANTARCTICA'], 234: ['SAN JUAN', 234.0, 18.483, -66.133, 17.0, 'PRI', 'IV'], 235: ['LONG VIEW', 235.0, 32.5, -94.75, 103.0, 'USA', 'IV'], 236: ['COOLIDGE FIELD', 236.0, 17.283, -61.783, 10.0, 'ATG', 'IV'], 237: ['GREAT FALLS', 237.0, 47.483, -111.35, 1118.0, 'USA', 'IV'], 238: ['DENVER', 238.0, 39.767, -104.883, 1611.0, 'USA', 'IV'], 239: ['SAN DIEGO', 239.0, 32.76, -117.19, 72.5, 'USA', 'IV'], 242: ['PRAHA', 242.0, 50.02, 14.45, 304.0, 'CZE', 'VI'], 254: ['LAVERTON', 254.0, -37.867, 144.75, 21.0, 'AUS', 'V'], 255: ['AINSWORTH (AIRPORT)', 255.0, 42.583, -100.0, 789.0, 'USA', 'IV'], 256: ['LAUDER', 256.0, -45.03, 169.683, 370.0, 'NZL', 'V'], 257: ['VANSCOY', 257.0, 52.115, -107.165, 510.0, 'CAN', 'IV'], 260: ['TABLE MOUNTAIN (CA)', 260.0, 34.4, -117.7, 2286.0, 'USA', 'IV'], 262: ['SODANKYLA', 262.0, 67.335, 26.505, 179.0, 'FIN', 'VI'], 265: ['IRENE', 265.0, -25.91, 28.211, 1524.0, 'ZAF', 'I'], 280: ['NOVOLASAREVSKAYA / FORSTER', 280.0, -70.767, 11.867, 110.0, 'ATA', 'ANTARCTICA'], 297: ['S.PIETRO CAPOFIUME', 297.0, 44.65, 11.617, 11.0, 'ITA', 'VI'], 303: ['IQALUIT', 303.0, 63.75, -68.55, 20.0, 'CAN', 'IV'], 308: ['MADRID / BARAJAS', 308.0, 40.46, -3.65, 650.0, 'ESP', 'VI'], 315: ['EUREKA / EUREKA LAB', 315.0, 80.04, -86.175, 310.0, 'CAN', 'IV'], 316: ['DE BILT', 316.0, 52.1, 5.18, 4.0, 'NLD', 'VI'], 318: ['VALENTIA OBSERVATORY', 318.0, 51.93, -10.25, 14.0, 'IRL', 'VI'], 323: ['NEUMAYER', 323.0, -70.65, -8.25, 42.0, 'ATA', 'ANTARCTICA'], 328: ['ASCENSION ISLAND', 328.0, -7.98, -14.42, 91.0, 'SHN', 'I'], 329: ['BRAZZAVILLE', 329.0, -4.28, 15.25, 314.0, 'COG', 'I'], 330: ['HANOI', 330.0, 21.033, 105.84, 5.0, 'VNM', 'II'], 333: ['PORTO NACIONAL', 333.0, -10.8, -48.4, 240.0, 'BRA', 'III'], 334: ['CUIABA', 334.0, -15.6, -56.1, 990.0, 'BRA', 'III'], 335: ['ETOSHA PAN', 335.0, -19.2, 15.9, 1100.0, 'NAM', 'I'], 336: ['ISFAHAN', 336.0, 32.477, 51.425, 1550.0, 'IRN', 'II'], 338: ['BRATTS LAKE (REGINA)', 338.0, 50.205, -104.705, 592.0, 'CAN', 'IV'], 339: ['USHUAIA', 339.0, -54.85, -68.308, 15.0, 'ARG', 'III'], 344: ['HONG KONG OBSERVATORY', 344.0, 22.31, 114.17, 66.0, 'HKG', 'II'], 348: ['ANKARA', 348.0, 39.95, 32.883, 896.0, 'TUR', 'VI'], 360: ['PELLSTON (MI)', 360.0, 45.56, -84.67, 238.0, 'USA', 'IV'], 361: ['HOLTVILLE (CA)', 361.0, 32.81, -115.42, -18.0, 'USA', 'IV'], 394: ['BROADMEADOWS', 394.0, -37.6914, 144.9467, 108.0, 'AUS', 'V'], 400: ['MAITRI', 400.0, -70.46, 11.45, 223.5, 'ATA', 'ANTARCTICA'], 401: ['SANTA CRUZ', 401.0, 28.42, -16.26, 36.0, 'ESP', 'I'], 404: ['JOKIOINEN', 404.0, 60.81, 23.5, 103.0, 'FIN', 'VI'], 406: ['SCORESBYSUND', 406.0, 70.49, -21.98, 50.0, 'GRL', 'VI'], 418: ['HUNTSVILLE', 418.0, 34.72, -86.64, 196.0, 'USA', 'IV'], 420: ['BELTSVILLE (MD)', 420.0, 39.02, -76.74, 64.0, 'USA', 'IV'], 432: ['PAPEETE (TAHITI)', 432.0, -18.0, -149.0, 2.0, 'PYF', 'V'], 434: ['SAN CRISTOBAL', 434.0, -0.92, -89.6, 8.0, 'ECU', 'III'], 435: ['PARAMARIBO', 435.0, 5.81, -55.21, 22.5, 'SUR', 'III'], 436: ['LA REUNION ISLAND', 436.0, -20.99, 55.48, 61.5, 'REU', 'I'], 437: ['WATUKOSEK (JAVA)', 437.0, -7.57, 112.65, 50.0, 'IDN', 'V'], 438: ['SUVA (FIJI)', 438.0, -18.13, 178.315, 6.0, 'FJI', 'V'], 439: ['KAASHIDHOO', 439.0, 5.0, 73.5, 1.0, 'MDV', 'V'], 441: ['EASTER ISLAND', 441.0, -27.17, -109.42, 62.0, 'CHL', 'III'], 443: ['SEPANG AIRPORT', 443.0, 2.73, 101.7, 17.0, 'MYS', 'V'], 444: ['CHEJU', 444.0, 33.5, 126.5, 300.0, 'KOR', 'II'], 445: ['TRINIDAD HEAD', 445.0, 40.8, -124.16, 55.0, 'USA', 'IV'], 448: ['MALINDI', 448.0, -2.99, 40.19, -6.0, 'KEN', 'I'], 450: ['DAVIS', 450.0, -68.577, 77.973, 16.0, 'ATA', 'ANTARCTICA'], 456: ['EGBERT', 456.0, 44.23, -79.78, 253.0, 'CAN', 'IV'], 457: ['KELOWNA', 457.0, 49.93, -119.4, 456.0, 'CAN', 'IV'], 458: ['YARMOUTH', 458.0, 43.87, -66.1, 9.0, 'CAN', 'IV'], 459: ['TBD', 459.0, 0.0, 0.0, 0.0, '', 'VI'], 460: ['THULE', 460.0, 76.53, -68.74, 57.0, 'GRL', 'VI'], 466: ['MAXARANGUAPE (SHADOZ-NATAL)', 466.0, -5.445, -35.33, 32.0, 'BRA', 'III'], 472: ['COTONOU', 472.0, 6.21, 2.23, 10.0, 'BEN', 'I'], 477: ['HEREDIA', 477.0, 10.0, -84.11, 1176.0, 'CRI', 'IV'], 480: ['SABLE ISLAND', 480.0, 43.93, -60.02, 4.0, 'CAN', 'IV'], 482: ['WALSINGHAM', 482.0, 42.6, -80.6, 200.0, 'CAN', 'IV'], 483: ['BARBADOS', 483.0, 13.16, -59.43, 32.0, 'BRB', 'III'], 484: ['HOUSTON (TX)', 484.0, 29.72, -95.4, 19.0, 'USA', 'IV'], 485: ['TECAMEC (UNAM)', 485.0, 19.33, -99.18, 2272.0, 'MEX', 'IV'], 487: ['NARRAGANSETT', 487.0, 41.49, -71.42, 21.0, 'USA', 'IV'], 488: ['PARADOX', 488.0, 43.92, -73.64, 284.0, 'USA', 'IV'], 489: ['RICHLAND', 489.0, 46.2, -119.16, 123.0, 'USA', 'IV'], 490: ['VALPARAISO (IN)', 490.0, 41.5, -87.0, 240.0, 'USA', 'IV'], 494: ['ALAJUELA', 494.0, 9.98, -84.21, 899.0, 'CRI', 'IV'] } return sonde_dict # ---- # 7.08 - returns (lat, lon, alt (press), timezone (UTC) ) for a given site # ---- def gaw_2_loc(site, f = 'GLOBAL_SURFACE_O3_2006_2012.nc'):#, f """ Extract GAW site locations for a given site Another file is availible with just GAW sites: 'GAW_SURFACE_O3_2006_2012.nc' """ from AC_tools.funcs4generic import hPa_to_Km # Use simple dictionary if site listed try: gaw_sites= { 'SMO': (-14.247, -170.565,1002.7885270480558, -11), 'MNM':(24.285, 153.981, 1011.9342452324959, 9), 'BMW':(32.27, -64.88, 1008.6109830510485, -4 ) ,'CVO': (16.848, -24.871, 1011.6679817831093, -1), 'RPB':(13.17000, -59.43000, 1007.0196960034474, -4 ), 'ogasawara': (26.38, 142.10,996.08181619552602, 9 ), 'OGA': (26.38, 142.10,996.08181619552602, 9 ) , # Add extras for ease of analysis (e.g. Roscoff ... ) 'ROS': (48.433, -3.5904, 1011.6679817831093, +1) } return gaw_sites[ site ] # If not in list then extract details from NetCDF except: wd= get_dir('dwd') +'ozonesurface/' with Dataset(wd+f, 'r', format='NETCDF4') as f: lon= f.groups[site].longitude alt = f.groups[site].altitude /1E3 lat = f.groups[site].latitude print [ (i, type(i) ) for i in lat, lon, alt ] return (lat, lon, float( hPa_to_Km([alt], reverse=True)[0] ), -9999 )
import theano import unittest from numpy.testing import assert_allclose import numpy as np from keras.layers.recurrent import SimpleRNN from mock import Mock floatX = theano.config.floatX __author__ = "Jeff Ye" class TestSimpleRNN(unittest.TestCase): left_padding_data = np.array( [ [ # batch 1 [0], [1], [2], [3] ], [ # batch 2 [0], [0], [1], [2] ] ], dtype=floatX) left_padding_mask = np.array( # n_sample x n_time [ [ # batch 1 0, 1, 1, 1 ], [ # batch 2 0, 0, 1, 1 ] ], dtype=np.int32) def setUp(self): W = np.array([[1]], dtype=floatX) U = np.array([[1]], dtype=floatX) b = np.array([0], dtype=floatX) weights = [W, U, b] self.forward = SimpleRNN(output_dim=1, activation='linear', weights=weights) self.backward = SimpleRNN(output_dim=1, activation='linear', weights=weights) previous = Mock() previous.nb_input = 1 previous.nb_output = 1 previous.output_shape = self.left_padding_data.shape previous.get_output_mask = Mock() self.previous = previous def test_left_padding(self): forward = self.forward forward.go_backwards = False forward.return_sequences = True self.previous.get_output.return_value = theano.shared(value=self.left_padding_data) self.previous.get_output_mask.return_value = theano.shared(value=self.left_padding_mask) forward.set_previous(self.previous) np.testing.assert_allclose(forward.get_output().eval(), np.array([ [[0], [1], [3], [6]], [[0], [0], [1], [3]]])) backward = self.backward backward.go_backwards = True backward.return_sequences = True self.previous.get_output.return_value = theano.shared(value=self.left_padding_data) self.previous.get_output_mask.return_value = theano.shared(value=self.left_padding_mask) backward.set_previous(self.previous) np.testing.assert_allclose(backward.get_output().eval(), np.array([ [[3], [5], [6], [0]], [[2], [3], [0], [0]]])) remove incompatible tests
#!/usr/bin/env python # build_script.py - Build, install, and test XCTest -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See http://swift.org/LICENSE.txt for license information # See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors import argparse import glob import os import subprocess import sys import tempfile import textwrap import platform import errno SOURCE_DIR = os.path.dirname(os.path.abspath(__file__)) def note(msg): print("xctest-build: "+msg) def run(command): note(command) subprocess.check_call(command, shell=True) def _mkdirp(path): """ Creates a directory at the given path if it doesn't already exist. """ if not os.path.exists(path): run("mkdir -p {}".format(path)) def symlink_force(target, link_name): if os.path.isdir(link_name): link_name = os.path.join(link_name, os.path.basename(target)) try: os.symlink(target, link_name) except OSError as e: if e.errno == errno.EEXIST: os.remove(link_name) os.symlink(target, link_name) else: raise e class DarwinStrategy: @staticmethod def requires_foundation_build_dir(): # The Foundation build directory is not required on Darwin because the # Xcode workspace implicitly builds Foundation when building the XCTest # schemes. return False @staticmethod def build(args): """ Build XCTest and place the built products in the given 'build_dir'. If 'test' is specified, also executes the 'test' subcommand. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace " "-scheme SwiftXCTest " "SWIFT_EXEC=\"{swiftc}\" " "SWIFT_LINK_OBJC_RUNTIME=YES " "SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\"".format( swiftc=swiftc, build_dir=build_dir, source_dir=SOURCE_DIR)) if args.test: # Execute main() using the arguments necessary to run the tests. main(args=["test", "--swiftc", swiftc, build_dir]) @staticmethod def test(args): """ Test SwiftXCTest.framework, using the given 'swiftc' compiler, looking for it in the given 'build_dir'. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace " "-scheme SwiftXCTestFunctionalTests " "SWIFT_EXEC=\"{swiftc}\" " "SWIFT_LINK_OBJC_RUNTIME=YES " "SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\" " "| grep -v \" export\"".format( swiftc=swiftc, build_dir=build_dir, source_dir=SOURCE_DIR)) @staticmethod def install(args): """ Installing XCTest is not supported on Darwin. """ note("error: The install command is not supported on this platform") exit(1) class GenericUnixStrategy: @staticmethod def requires_foundation_build_dir(): # This script does not know how to build Foundation in Unix environments, # so we need the path to a pre-built Foundation library. return True @staticmethod def build(args): """ Build XCTest and place the built products in the given 'build_dir'. If 'test' is specified, also executes the 'test' subcommand. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) foundation_build_dir = os.path.abspath(args.foundation_build_dir) core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir( foundation_build_dir, args.foundation_install_prefix) if args.libdispatch_build_dir: libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir) if args.libdispatch_src_dir: libdispatch_src_dir = os.path.abspath(args.libdispatch_src_dir) _mkdirp(build_dir) sourcePaths = glob.glob(os.path.join( SOURCE_DIR, 'Sources', 'XCTest', '*', '*.swift')) if args.build_style == "debug": style_options = "-g" else: style_options = "-O" # Not incremental.. # Build library if args.libdispatch_build_dir and args.libdispatch_src_dir: libdispatch_args = "-I {libdispatch_build_dir}/src -I {libdispatch_src_dir} ".format( libdispatch_build_dir=libdispatch_build_dir, libdispatch_src_dir=libdispatch_src_dir) else: libdispatch_args = "" run("{swiftc} -Xcc -fblocks -c {style_options} -emit-object -emit-module " "-module-name XCTest -module-link-name XCTest -parse-as-library " "-emit-module-path {build_dir}/XCTest.swiftmodule " "-force-single-frontend-invocation " "-I {foundation_build_dir} -I {core_foundation_build_dir} " "{libdispatch_args} " "{source_paths} -o {build_dir}/XCTest.o".format( swiftc=swiftc, style_options=style_options, build_dir=build_dir, foundation_build_dir=foundation_build_dir, core_foundation_build_dir=core_foundation_build_dir, libdispatch_args=libdispatch_args, source_paths=" ".join(sourcePaths))) run("{swiftc} -emit-library {build_dir}/XCTest.o " "-L {foundation_build_dir} -lswiftGlibc -lswiftCore -lFoundation -lm " # We embed an rpath of `$ORIGIN` to ensure other referenced # libraries (like `Foundation`) can be found solely via XCTest. "-Xlinker -rpath=\\$ORIGIN " "-o {build_dir}/libXCTest.so".format( swiftc=swiftc, build_dir=build_dir, foundation_build_dir=foundation_build_dir)) if args.test: # Execute main() using the arguments necessary to run the tests. main(args=["test", "--swiftc", swiftc, "--foundation-build-dir", foundation_build_dir, build_dir]) # If --module-install-path and --library-install-path were specified, # we also install the built XCTest products. if args.module_path is not None and args.lib_path is not None: # Execute main() using the arguments necessary for installation. main(args=["install", build_dir, "--module-install-path", args.module_path, "--library-install-path", args.lib_path]) note('Done.') @staticmethod def test(args): """ Test the built XCTest.so library at the given 'build_dir', using the given 'swiftc' compiler. """ lit_path = os.path.abspath(args.lit) if not os.path.exists(lit_path): raise IOError( 'Could not find lit tester tool at path: "{}". This tool is ' 'requred to run the test suite. Unless you specified a custom ' 'path to the tool using the "--lit" option, the lit tool will be ' 'found in the LLVM source tree, which is expected to be checked ' 'out in the same directory as swift-corelibs-xctest. If you do ' 'not have LLVM checked out at this path, you may follow the ' 'instructions for "Getting Sources for Swift and Related ' 'Projects" from the Swift project README in order to fix this ' 'error.'.format(lit_path)) # FIXME: Allow these to be specified by the Swift build script. lit_flags = "-sv --no-progress-bar" tests_path = os.path.join(SOURCE_DIR, "Tests", "Functional") foundation_build_dir = os.path.abspath(args.foundation_build_dir) core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir( foundation_build_dir, args.foundation_install_prefix) if args.libdispatch_build_dir: libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir) symlink_force(os.path.join(args.libdispatch_build_dir, "src", ".libs", "libdispatch.so"), foundation_build_dir) if args.libdispatch_src_dir and args.libdispatch_build_dir: libdispatch_src_args = "LIBDISPATCH_SRC_DIR={libdispatch_src_dir} LIBDISPATCH_BUILD_DIR={libdispatch_build_dir}".format( libdispatch_src_dir=os.path.abspath(args.libdispatch_src_dir), libdispatch_build_dir=os.path.join(args.libdispatch_build_dir, 'src', '.libs')) else: libdispatch_src_args = "" run('SWIFT_EXEC={swiftc} ' 'BUILT_PRODUCTS_DIR={built_products_dir} ' 'FOUNDATION_BUILT_PRODUCTS_DIR={foundation_build_dir} ' 'CORE_FOUNDATION_BUILT_PRODUCTS_DIR={core_foundation_build_dir} ' '{libdispatch_src_args} ' '{lit_path} {lit_flags} ' '{tests_path}'.format( swiftc=os.path.abspath(args.swiftc), built_products_dir=args.build_dir, foundation_build_dir=foundation_build_dir, core_foundation_build_dir=core_foundation_build_dir, libdispatch_src_args=libdispatch_src_args, lit_path=lit_path, lit_flags=lit_flags, tests_path=tests_path)) @staticmethod def install(args): """ Install the XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc build products into the given module and library paths. """ build_dir = os.path.abspath(args.build_dir) module_install_path = os.path.abspath(args.module_install_path) library_install_path = os.path.abspath(args.library_install_path) _mkdirp(module_install_path) _mkdirp(library_install_path) xctest_so = "libXCTest.so" run("cp {} {}".format( os.path.join(build_dir, xctest_so), os.path.join(library_install_path, xctest_so))) xctest_swiftmodule = "XCTest.swiftmodule" run("cp {} {}".format( os.path.join(build_dir, xctest_swiftmodule), os.path.join(module_install_path, xctest_swiftmodule))) xctest_swiftdoc = "XCTest.swiftdoc" run("cp {} {}".format( os.path.join(build_dir, xctest_swiftdoc), os.path.join(module_install_path, xctest_swiftdoc))) @staticmethod def core_foundation_build_dir(foundation_build_dir, foundation_install_prefix): """ Given the path to a swift-corelibs-foundation built product directory, return the path to CoreFoundation built products. When specifying a built Foundation dir such as '/build/foundation-linux-x86_64/Foundation', CoreFoundation dependencies are placed in 'usr/lib/swift'. Note that it's technically not necessary to include this extra path when linking the installed Swift's 'usr/lib/swift/linux/libFoundation.so'. """ return os.path.join(foundation_build_dir, foundation_install_prefix.strip("/"), 'lib', 'swift') def main(args=sys.argv[1:]): """ The main entry point for this script. Based on the subcommand given, delegates building or testing XCTest to a sub-parser and its corresponding function. """ strategy = DarwinStrategy if platform.system() == 'Darwin' else GenericUnixStrategy parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(""" Build, test, and install XCTest. NOTE: In general this script should not be invoked directly. The recommended way to build and test XCTest is via the Swift build script. See this project's README for details. The Swift build script invokes this %(prog)s script to build, test, and install this project. You may invoke it in the same way to build this project directly. For example, if you are in a Linux environment, your install of Swift is located at "/swift" and you wish to install XCTest into that same location, here is a sample invocation of the build script: $ %(prog)s \\ --swiftc="/swift/usr/bin/swiftc" \\ --build-dir="/tmp/XCTest_build" \\ --foundation-build-dir "/swift/usr/lib/swift/linux" \\ --library-install-path="/swift/usr/lib/swift/linux" \\ --module-install-path="/swift/usr/lib/swift/linux/x86_64" Note that installation is not supported on Darwin as this library is only intended to be used as a dependency in environments where Apple XCTest is not available. """)) subparsers = parser.add_subparsers( description=textwrap.dedent(""" Use one of these to specify whether to build, test, or install XCTest. If you don't specify any of these, 'build' is executed as a default. You may also use 'build' to also test and install the built products. Pass the -h or --help option to any of the subcommands for more information.""")) build_parser = subparsers.add_parser( "build", description=textwrap.dedent(""" Build XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc using the given Swift compiler. This command may also test and install the built products.""")) build_parser.set_defaults(func=strategy.build) build_parser.add_argument( "--swiftc", help="Path to the 'swiftc' compiler that will be used to build " "XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc. This will " "also be used to build the tests for those built products if the " "--test option is specified.", required=True) build_parser.add_argument( "--build-dir", help="Path to the output build directory. If not specified, a " "temporary directory is used.", default=tempfile.mkdtemp()) build_parser.add_argument( "--foundation-build-dir", help="Path to swift-corelibs-foundation build products, which " "the built XCTest.so will be linked against.", required=strategy.requires_foundation_build_dir()) build_parser.add_argument( "--foundation-install-prefix", help="Path to the installation location for swift-corelibs-foundation " "build products ('%(default)s' by default); CoreFoundation " "dependencies are expected to be found under " "FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.", default="/usr") build_parser.add_argument( "--libdispatch-build-dir", help="Path to swift-corelibs-libdispatch build products, which " "the built XCTest.so will be linked against.") build_parser.add_argument( "--libdispatch-src-dir", help="Path to swift-corelibs-libdispatch source tree, which " "the built XCTest.so will be linked against.") build_parser.add_argument( "--module-install-path", help="Location at which to install XCTest.swiftmodule and " "XCTest.swiftdoc. This directory will be created if it doesn't " "already exist.", dest="module_path") build_parser.add_argument( "--library-install-path", help="Location at which to install XCTest.so. This directory will be " "created if it doesn't already exist.", dest="lib_path") build_parser.add_argument( "--release", help="builds for release", action="store_const", dest="build_style", const="release", default="debug") build_parser.add_argument( "--debug", help="builds for debug (the default)", action="store_const", dest="build_style", const="debug", default="debug") build_parser.add_argument( "--test", help="Whether to run tests after building. Note that you must have " "cloned https://github.com/apple/swift-llvm at {} in order to " "run this command.".format(os.path.join( os.path.dirname(SOURCE_DIR), 'llvm')), action="store_true") test_parser = subparsers.add_parser( "test", description="Tests a built XCTest framework at the given path.") test_parser.set_defaults(func=strategy.test) test_parser.add_argument( "build_dir", help="An absolute path to a directory containing the built XCTest.so " "library.") test_parser.add_argument( "--swiftc", help="Path to the 'swiftc' compiler used to build and run the tests.", required=True) test_parser.add_argument( "--lit", help="Path to the 'lit' tester tool used to run the test suite. " "'%(default)s' by default.", default=os.path.join(os.path.dirname(SOURCE_DIR), "llvm", "utils", "lit", "lit.py")) test_parser.add_argument( "--foundation-build-dir", help="Path to swift-corelibs-foundation build products, which the " "tests will be linked against.", required=strategy.requires_foundation_build_dir()) test_parser.add_argument( "--foundation-install-prefix", help="Path to the installation location for swift-corelibs-foundation " "build products ('%(default)s' by default); CoreFoundation " "dependencies are expected to be found under " "FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.", default="/usr") test_parser.add_argument( "--libdispatch-build-dir", help="Path to swift-corelibs-libdispatch build products, which " "the built XCTest.so will be linked against.") test_parser.add_argument( "--libdispatch-src-dir", help="Path to swift-corelibs-libdispatch source tree, which " "the built XCTest.so will be linked against.") install_parser = subparsers.add_parser( "install", description="Installs a built XCTest framework.") install_parser.set_defaults(func=strategy.install) install_parser.add_argument( "build_dir", help="An absolute path to a directory containing a built XCTest.so, " "XCTest.swiftmodule, and XCTest.swiftdoc.") install_parser.add_argument( "-m", "--module-install-path", help="Location at which to install XCTest.swiftmodule and " "XCTest.swiftdoc. This directory will be created if it doesn't " "already exist.") install_parser.add_argument( "-l", "--library-install-path", help="Location at which to install XCTest.so. This directory will be " "created if it doesn't already exist.") # Many versions of Python require a subcommand must be specified. # We handle this here: if no known subcommand (or none of the help options) # is included in the arguments, then insert the default subcommand # argument: 'build'. if any([a in ["build", "test", "install", "-h", "--help"] for a in args]): parsed_args = parser.parse_args(args=args) else: parsed_args = parser.parse_args(args=["build"] + args) # Execute the function for the subcommand we've been given. parsed_args.func(parsed_args) if __name__ == '__main__': main() Use build type parameter when building and testing using xcodebuild. #!/usr/bin/env python # build_script.py - Build, install, and test XCTest -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See http://swift.org/LICENSE.txt for license information # See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors import argparse import glob import os import subprocess import sys import tempfile import textwrap import platform import errno SOURCE_DIR = os.path.dirname(os.path.abspath(__file__)) def note(msg): print("xctest-build: "+msg) def run(command): note(command) subprocess.check_call(command, shell=True) def _mkdirp(path): """ Creates a directory at the given path if it doesn't already exist. """ if not os.path.exists(path): run("mkdir -p {}".format(path)) def symlink_force(target, link_name): if os.path.isdir(link_name): link_name = os.path.join(link_name, os.path.basename(target)) try: os.symlink(target, link_name) except OSError as e: if e.errno == errno.EEXIST: os.remove(link_name) os.symlink(target, link_name) else: raise e class DarwinStrategy: @staticmethod def requires_foundation_build_dir(): # The Foundation build directory is not required on Darwin because the # Xcode workspace implicitly builds Foundation when building the XCTest # schemes. return False @staticmethod def build(args): """ Build XCTest and place the built products in the given 'build_dir'. If 'test' is specified, also executes the 'test' subcommand. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) if args.build_style == "debug": style_options = "Debug" else: style_options = "Release" run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace " "-scheme SwiftXCTest " "-configuration {style_options} " "SWIFT_EXEC=\"{swiftc}\" " "SWIFT_LINK_OBJC_RUNTIME=YES " "SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\"".format( swiftc=swiftc, build_dir=build_dir, style_options=style_options, source_dir=SOURCE_DIR)) if args.test: # Execute main() using the arguments necessary to run the tests. main(args=["test", "--swiftc", swiftc, build_dir]) @staticmethod def test(args): """ Test SwiftXCTest.framework, using the given 'swiftc' compiler, looking for it in the given 'build_dir'. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) if args.build_style == "debug": style_options = "Debug" else: style_options = "Release" run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace " "-scheme SwiftXCTestFunctionalTests " "-configuration {style_options} " "SWIFT_EXEC=\"{swiftc}\" " "SWIFT_LINK_OBJC_RUNTIME=YES " "SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\" " "| grep -v \" export\"".format( swiftc=swiftc, build_dir=build_dir, style_options=style_options, source_dir=SOURCE_DIR)) @staticmethod def install(args): """ Installing XCTest is not supported on Darwin. """ note("error: The install command is not supported on this platform") exit(1) class GenericUnixStrategy: @staticmethod def requires_foundation_build_dir(): # This script does not know how to build Foundation in Unix environments, # so we need the path to a pre-built Foundation library. return True @staticmethod def build(args): """ Build XCTest and place the built products in the given 'build_dir'. If 'test' is specified, also executes the 'test' subcommand. """ swiftc = os.path.abspath(args.swiftc) build_dir = os.path.abspath(args.build_dir) foundation_build_dir = os.path.abspath(args.foundation_build_dir) core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir( foundation_build_dir, args.foundation_install_prefix) if args.libdispatch_build_dir: libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir) if args.libdispatch_src_dir: libdispatch_src_dir = os.path.abspath(args.libdispatch_src_dir) _mkdirp(build_dir) sourcePaths = glob.glob(os.path.join( SOURCE_DIR, 'Sources', 'XCTest', '*', '*.swift')) if args.build_style == "debug": style_options = "-g" else: style_options = "-O" # Not incremental.. # Build library if args.libdispatch_build_dir and args.libdispatch_src_dir: libdispatch_args = "-I {libdispatch_build_dir}/src -I {libdispatch_src_dir} ".format( libdispatch_build_dir=libdispatch_build_dir, libdispatch_src_dir=libdispatch_src_dir) else: libdispatch_args = "" run("{swiftc} -Xcc -fblocks -c {style_options} -emit-object -emit-module " "-module-name XCTest -module-link-name XCTest -parse-as-library " "-emit-module-path {build_dir}/XCTest.swiftmodule " "-force-single-frontend-invocation " "-I {foundation_build_dir} -I {core_foundation_build_dir} " "{libdispatch_args} " "{source_paths} -o {build_dir}/XCTest.o".format( swiftc=swiftc, style_options=style_options, build_dir=build_dir, foundation_build_dir=foundation_build_dir, core_foundation_build_dir=core_foundation_build_dir, libdispatch_args=libdispatch_args, source_paths=" ".join(sourcePaths))) run("{swiftc} -emit-library {build_dir}/XCTest.o " "-L {foundation_build_dir} -lswiftGlibc -lswiftCore -lFoundation -lm " # We embed an rpath of `$ORIGIN` to ensure other referenced # libraries (like `Foundation`) can be found solely via XCTest. "-Xlinker -rpath=\\$ORIGIN " "-o {build_dir}/libXCTest.so".format( swiftc=swiftc, build_dir=build_dir, foundation_build_dir=foundation_build_dir)) if args.test: # Execute main() using the arguments necessary to run the tests. main(args=["test", "--swiftc", swiftc, "--foundation-build-dir", foundation_build_dir, build_dir]) # If --module-install-path and --library-install-path were specified, # we also install the built XCTest products. if args.module_path is not None and args.lib_path is not None: # Execute main() using the arguments necessary for installation. main(args=["install", build_dir, "--module-install-path", args.module_path, "--library-install-path", args.lib_path]) note('Done.') @staticmethod def test(args): """ Test the built XCTest.so library at the given 'build_dir', using the given 'swiftc' compiler. """ lit_path = os.path.abspath(args.lit) if not os.path.exists(lit_path): raise IOError( 'Could not find lit tester tool at path: "{}". This tool is ' 'requred to run the test suite. Unless you specified a custom ' 'path to the tool using the "--lit" option, the lit tool will be ' 'found in the LLVM source tree, which is expected to be checked ' 'out in the same directory as swift-corelibs-xctest. If you do ' 'not have LLVM checked out at this path, you may follow the ' 'instructions for "Getting Sources for Swift and Related ' 'Projects" from the Swift project README in order to fix this ' 'error.'.format(lit_path)) # FIXME: Allow these to be specified by the Swift build script. lit_flags = "-sv --no-progress-bar" tests_path = os.path.join(SOURCE_DIR, "Tests", "Functional") foundation_build_dir = os.path.abspath(args.foundation_build_dir) core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir( foundation_build_dir, args.foundation_install_prefix) if args.libdispatch_build_dir: libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir) symlink_force(os.path.join(args.libdispatch_build_dir, "src", ".libs", "libdispatch.so"), foundation_build_dir) if args.libdispatch_src_dir and args.libdispatch_build_dir: libdispatch_src_args = "LIBDISPATCH_SRC_DIR={libdispatch_src_dir} LIBDISPATCH_BUILD_DIR={libdispatch_build_dir}".format( libdispatch_src_dir=os.path.abspath(args.libdispatch_src_dir), libdispatch_build_dir=os.path.join(args.libdispatch_build_dir, 'src', '.libs')) else: libdispatch_src_args = "" run('SWIFT_EXEC={swiftc} ' 'BUILT_PRODUCTS_DIR={built_products_dir} ' 'FOUNDATION_BUILT_PRODUCTS_DIR={foundation_build_dir} ' 'CORE_FOUNDATION_BUILT_PRODUCTS_DIR={core_foundation_build_dir} ' '{libdispatch_src_args} ' '{lit_path} {lit_flags} ' '{tests_path}'.format( swiftc=os.path.abspath(args.swiftc), built_products_dir=args.build_dir, foundation_build_dir=foundation_build_dir, core_foundation_build_dir=core_foundation_build_dir, libdispatch_src_args=libdispatch_src_args, lit_path=lit_path, lit_flags=lit_flags, tests_path=tests_path)) @staticmethod def install(args): """ Install the XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc build products into the given module and library paths. """ build_dir = os.path.abspath(args.build_dir) module_install_path = os.path.abspath(args.module_install_path) library_install_path = os.path.abspath(args.library_install_path) _mkdirp(module_install_path) _mkdirp(library_install_path) xctest_so = "libXCTest.so" run("cp {} {}".format( os.path.join(build_dir, xctest_so), os.path.join(library_install_path, xctest_so))) xctest_swiftmodule = "XCTest.swiftmodule" run("cp {} {}".format( os.path.join(build_dir, xctest_swiftmodule), os.path.join(module_install_path, xctest_swiftmodule))) xctest_swiftdoc = "XCTest.swiftdoc" run("cp {} {}".format( os.path.join(build_dir, xctest_swiftdoc), os.path.join(module_install_path, xctest_swiftdoc))) @staticmethod def core_foundation_build_dir(foundation_build_dir, foundation_install_prefix): """ Given the path to a swift-corelibs-foundation built product directory, return the path to CoreFoundation built products. When specifying a built Foundation dir such as '/build/foundation-linux-x86_64/Foundation', CoreFoundation dependencies are placed in 'usr/lib/swift'. Note that it's technically not necessary to include this extra path when linking the installed Swift's 'usr/lib/swift/linux/libFoundation.so'. """ return os.path.join(foundation_build_dir, foundation_install_prefix.strip("/"), 'lib', 'swift') def main(args=sys.argv[1:]): """ The main entry point for this script. Based on the subcommand given, delegates building or testing XCTest to a sub-parser and its corresponding function. """ strategy = DarwinStrategy if platform.system() == 'Darwin' else GenericUnixStrategy parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(""" Build, test, and install XCTest. NOTE: In general this script should not be invoked directly. The recommended way to build and test XCTest is via the Swift build script. See this project's README for details. The Swift build script invokes this %(prog)s script to build, test, and install this project. You may invoke it in the same way to build this project directly. For example, if you are in a Linux environment, your install of Swift is located at "/swift" and you wish to install XCTest into that same location, here is a sample invocation of the build script: $ %(prog)s \\ --swiftc="/swift/usr/bin/swiftc" \\ --build-dir="/tmp/XCTest_build" \\ --foundation-build-dir "/swift/usr/lib/swift/linux" \\ --library-install-path="/swift/usr/lib/swift/linux" \\ --module-install-path="/swift/usr/lib/swift/linux/x86_64" Note that installation is not supported on Darwin as this library is only intended to be used as a dependency in environments where Apple XCTest is not available. """)) subparsers = parser.add_subparsers( description=textwrap.dedent(""" Use one of these to specify whether to build, test, or install XCTest. If you don't specify any of these, 'build' is executed as a default. You may also use 'build' to also test and install the built products. Pass the -h or --help option to any of the subcommands for more information.""")) build_parser = subparsers.add_parser( "build", description=textwrap.dedent(""" Build XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc using the given Swift compiler. This command may also test and install the built products.""")) build_parser.set_defaults(func=strategy.build) build_parser.add_argument( "--swiftc", help="Path to the 'swiftc' compiler that will be used to build " "XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc. This will " "also be used to build the tests for those built products if the " "--test option is specified.", required=True) build_parser.add_argument( "--build-dir", help="Path to the output build directory. If not specified, a " "temporary directory is used.", default=tempfile.mkdtemp()) build_parser.add_argument( "--foundation-build-dir", help="Path to swift-corelibs-foundation build products, which " "the built XCTest.so will be linked against.", required=strategy.requires_foundation_build_dir()) build_parser.add_argument( "--foundation-install-prefix", help="Path to the installation location for swift-corelibs-foundation " "build products ('%(default)s' by default); CoreFoundation " "dependencies are expected to be found under " "FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.", default="/usr") build_parser.add_argument( "--libdispatch-build-dir", help="Path to swift-corelibs-libdispatch build products, which " "the built XCTest.so will be linked against.") build_parser.add_argument( "--libdispatch-src-dir", help="Path to swift-corelibs-libdispatch source tree, which " "the built XCTest.so will be linked against.") build_parser.add_argument( "--module-install-path", help="Location at which to install XCTest.swiftmodule and " "XCTest.swiftdoc. This directory will be created if it doesn't " "already exist.", dest="module_path") build_parser.add_argument( "--library-install-path", help="Location at which to install XCTest.so. This directory will be " "created if it doesn't already exist.", dest="lib_path") build_parser.add_argument( "--release", help="builds for release", action="store_const", dest="build_style", const="release", default="debug") build_parser.add_argument( "--debug", help="builds for debug (the default)", action="store_const", dest="build_style", const="debug", default="debug") build_parser.add_argument( "--test", help="Whether to run tests after building. Note that you must have " "cloned https://github.com/apple/swift-llvm at {} in order to " "run this command.".format(os.path.join( os.path.dirname(SOURCE_DIR), 'llvm')), action="store_true") test_parser = subparsers.add_parser( "test", description="Tests a built XCTest framework at the given path.") test_parser.set_defaults(func=strategy.test) test_parser.add_argument( "build_dir", help="An absolute path to a directory containing the built XCTest.so " "library.") test_parser.add_argument( "--swiftc", help="Path to the 'swiftc' compiler used to build and run the tests.", required=True) test_parser.add_argument( "--lit", help="Path to the 'lit' tester tool used to run the test suite. " "'%(default)s' by default.", default=os.path.join(os.path.dirname(SOURCE_DIR), "llvm", "utils", "lit", "lit.py")) test_parser.add_argument( "--foundation-build-dir", help="Path to swift-corelibs-foundation build products, which the " "tests will be linked against.", required=strategy.requires_foundation_build_dir()) test_parser.add_argument( "--foundation-install-prefix", help="Path to the installation location for swift-corelibs-foundation " "build products ('%(default)s' by default); CoreFoundation " "dependencies are expected to be found under " "FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.", default="/usr") test_parser.add_argument( "--libdispatch-build-dir", help="Path to swift-corelibs-libdispatch build products, which " "the built XCTest.so will be linked against.") test_parser.add_argument( "--libdispatch-src-dir", help="Path to swift-corelibs-libdispatch source tree, which " "the built XCTest.so will be linked against.") test_parser.add_argument( "--release", help="builds the tests for release", action="store_const", dest="build_style", const="release", default="debug") test_parser.add_argument( "--debug", help="builds the tests for debug (the default)", action="store_const", dest="build_style", const="debug", default="debug") install_parser = subparsers.add_parser( "install", description="Installs a built XCTest framework.") install_parser.set_defaults(func=strategy.install) install_parser.add_argument( "build_dir", help="An absolute path to a directory containing a built XCTest.so, " "XCTest.swiftmodule, and XCTest.swiftdoc.") install_parser.add_argument( "-m", "--module-install-path", help="Location at which to install XCTest.swiftmodule and " "XCTest.swiftdoc. This directory will be created if it doesn't " "already exist.") install_parser.add_argument( "-l", "--library-install-path", help="Location at which to install XCTest.so. This directory will be " "created if it doesn't already exist.") # Many versions of Python require a subcommand must be specified. # We handle this here: if no known subcommand (or none of the help options) # is included in the arguments, then insert the default subcommand # argument: 'build'. if any([a in ["build", "test", "install", "-h", "--help"] for a in args]): parsed_args = parser.parse_args(args=args) else: parsed_args = parser.parse_args(args=["build"] + args) # Execute the function for the subcommand we've been given. parsed_args.func(parsed_args) if __name__ == '__main__': main()
from __future__ import (absolute_import, division, print_function, unicode_literals) import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from .components import aes, assign_visual_mapping from .components import colors, shapes from .components.legend import draw_legend from .geoms import * from .scales import * from .themes.theme_gray import _set_default_theme_rcparams from .themes.theme_gray import _theme_grey_post_plot_callback import ggplot.utils.six as six __ALL__ = ["ggplot"] import sys import re import warnings # Show plots if in interactive mode if sys.flags.interactive: plt.ion() class ggplot(object): """ ggplot is the base layer or object that you use to define the components of your chart (x and y axis, shapes, colors, etc.). You can combine it with layers (or geoms) to make complex graphics with minimal effort. Parameters ----------- aesthetics : aes (ggplot.components.aes.aes) aesthetics of your plot data : pandas DataFrame (pd.DataFrame) a DataFrame with the data you want to plot Examples ---------- >>> p = ggplot(aes(x='x', y='y'), data=diamonds) >>> print(p + geom_point()) """ CONTINUOUS = ['x', 'y', 'size', 'alpha'] DISCRETE = ['color', 'shape', 'marker', 'alpha', 'linestyle'] def __init__(self, aesthetics, data): # ggplot should just 'figure out' which is which if not isinstance(data, pd.DataFrame): aesthetics, data = data, aesthetics self.aesthetics = aesthetics self.data = data self.data = _build_df_from_transforms(self.data, self.aesthetics) # defaults self.geoms = [] self.n_wide = 1 self.n_high = 1 self.n_dim_x = None self.n_dim_y = None # facets self.facets = [] self.facet_type = None self.facet_scales = None self.facet_pairs = [] # used by facet_grid # components self.title = None self.xlab = None self.ylab = None # format for x/y major ticks self.xtick_formatter = None self.xbreaks = None self.xtick_labels = None self.xmajor_locator = None self.xminor_locator = None self.ytick_formatter = None self.xlimits = None self.ylimits = None self.scale_y_reverse = None self.scale_x_reverse = None # legend is a dictionary of {legend_type: {visual_value: legend_key}}, # where legend_type is one of "color", "linestyle", "marker", "size"; # visual_value is color value, line style, marker character, or size # value; and legend_key is a quantile. self.legend = {} # Theme releated options # this must be set by any theme to prevent addig the default theme self.theme_applied = False self.rcParams = {} # Callbacks to change aspects of each axis self.post_plot_callbacks = [] # continuous color configs self.color_scale = None self.colormap = plt.cm.Blues self.manual_color_list = None def __repr__(self): """Print/show the plot""" figure = self.draw() # We're going to default to making the plot appear when __repr__ is # called. #figure.show() # doesn't work in ipython notebook plt.show() # TODO: We can probably get more sugary with this return "<ggplot: (%d)>" % self.__hash__() def draw(self): # Adding rc=self.rcParams does not validate/parses the params which then # throws an error during plotting! with mpl.rc_context(): if not self.theme_applied: _set_default_theme_rcparams(mpl) # will be empty if no theme was applied for key in six.iterkeys(self.rcParams): val = self.rcParams[key] # there is a bug in matplotlib which does not allow None directly # https://github.com/matplotlib/matplotlib/issues/2543 try: if key == 'text.dvipnghack' and val is None: val = "none" mpl.rcParams[key] = val except Exception as e: msg = """Setting "mpl.rcParams['%s']=%s" raised an Exception: %s""" % (key, str(val), str(e)) warnings.warn(msg, RuntimeWarning) # draw is not allowed to show a plot, so we can use to result for ggsave # This sets a rcparam, so we don't have to undo it after plotting mpl.interactive(False) if self.facet_type == "grid": fig, axs = plt.subplots(self.n_high, self.n_wide, sharex=True, sharey=True) plt.subplots_adjust(wspace=.05, hspace=.05) elif self.facet_type == "wrap": # add (more than) the needed number of subplots fig, axs = plt.subplots(self.n_high, self.n_wide) # there are some extra, remove the plots subplots_available = self.n_wide * self.n_high extra_subplots = subplots_available - self.n_dim_x for extra_plot in axs.flatten()[-extra_subplots:]: extra_plot.axis('off') # plots is a mapping from xth-plot -> subplot position plots = [] for x in range(self.n_wide): for y in range(self.n_high): plots.append((x, y)) plots = sorted(plots, key=lambda x: x[1] + x[0] * self.n_high + 1) else: fig, axs = plt.subplots(self.n_high, self.n_wide) # Set the default plot to the first one plt.subplot(self.n_wide, self.n_high, 1) # Aes need to be initialized BEFORE we start faceting. This is b/c # we want to have a consistent aes mapping across facets. self.data = assign_visual_mapping(self.data, self.aesthetics, self) # Faceting just means doing an additional groupby. The # dimensions of the plot remain the same if self.facets: # the current subplot in the axs and plots cntr = 0 if len(self.facets) == 2 and self.facet_type != "wrap": # store the extreme x and y coordinates of each pair of axes axis_extremes = np.zeros(shape=(self.n_high * self.n_wide, 4)) xlab_offset = .15 for _iter, (facets, frame) in enumerate(self.data.groupby(self.facets)): pos = self.facet_pairs.index(facets) + 1 plt.subplot(self.n_wide, self.n_high, pos) for layer in self._get_layers(frame): for geom in self.geoms: callbacks = geom.plot_layer(layer) axis_extremes[_iter] = [min(plt.xlim()), max(plt.xlim()), min(plt.ylim()), max(plt.ylim())] # find the grid wide data extremeties xlab_min, ylab_min = np.min(axis_extremes, axis=0)[[0, 2]] xlab_max, ylab_max = np.max(axis_extremes, axis=0)[[1, 3]] # position of vertical labels for facet grid xlab_pos = xlab_max + xlab_offset ylab_pos = ylab_max - float(ylab_max - ylab_min) / 2 # This needs to enumerate all possibilities for _iter, facets in enumerate(self.facet_pairs): pos = _iter + 1 if pos <= self.n_high: plt.subplot(self.n_wide, self.n_high, pos) for layer in self._get_layers(self.data): for geom in self.geoms: callbacks = geom.plot_layer(layer) axis_extremes[_iter] = [min(plt.xlim()), max(plt.xlim()), min(plt.ylim()), max(plt.ylim())] # find the grid wide data extremeties xlab_min, ylab_min = np.min(axis_extremes, axis=0)[[0, 2]] xlab_max, ylab_max = np.max(axis_extremes, axis=0)[[1, 3]] # position of vertical labels for facet grid xlab_pos = xlab_max + xlab_offset ylab_pos = ylab_max - float(ylab_max - ylab_min) / 2 # This needs to enumerate all possibilities for pos, facets in enumerate(self.facet_pairs): pos += 1 if pos <= self.n_high: plt.subplot(self.n_wide, self.n_high, pos) plt.table(cellText=[[facets[1]]], loc='top', cellLoc='center', cellColours=[['lightgrey']]) if (pos % self.n_high) == 0: plt.subplot(self.n_wide, self.n_high, pos) x = max(plt.xticks()[0]) y = max(plt.yticks()[0]) ax = axs[pos % self.n_high][pos % self.n_wide] plt.text(xlab_pos, ylab_pos, facets[0], bbox=dict( facecolor='lightgrey', edgecolor='black', color='black', width=mpl.rcParams['font.size'] * 1.65 ), fontdict=dict(rotation=-90, verticalalignment="center", horizontalalignment='left') ) plt.subplot(self.n_wide, self.n_high, pos) # Handle the different scale types here # (free|free_y|free_x|None) and also make sure that only the # left column gets y scales and the bottom row gets x scales scale_facet_grid(self.n_wide, self.n_high, self.facet_pairs, self.facet_scales) else: # now facet_wrap > 2 for facet, frame in self.data.groupby(self.facets): for layer in self._get_layers(frame): for geom in self.geoms: if self.facet_type == "wrap": if cntr + 1 > len(plots): continue pos = plots[cntr] if pos is None: continue y_i, x_i = pos pos = x_i + y_i * self.n_high + 1 plt.subplot(self.n_wide, self.n_high, pos) else: plt.subplot(self.n_wide, self.n_high, cntr) # TODO: this needs some work if (cntr % self.n_high) == -1: plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off') callbacks = geom.plot_layer(layer) if callbacks: for callback in callbacks: fn = getattr(axs[cntr], callback['function']) fn(*callback['args']) title = facet if isinstance(facet, tuple): title = ", ".join(facet) plt.table(cellText=[[title]], loc='top', cellLoc='center', cellColours=[['lightgrey']]) cntr += 1 # NOTE: Passing n_high for cols (instead of n_wide) and # n_wide for rows because in all previous calls to # plt.subplot, n_wide is passed as the number of rows, not # columns. scale_facet_wrap(self.n_wide, self.n_high, range(cntr), self.facet_scales) else: # no faceting for geom in self.geoms: _aes = self.aesthetics if geom.aes: # update the default mapping with the geom specific one _aes = _aes.copy() _aes.update(geom.aes) if not geom.data is None: data = _build_df_from_transforms(geom.data, _aes) data = assign_visual_mapping(data, _aes, self) else: data = self.data for layer in self._get_layers(data, _aes): plt.subplot(1, 1, 1) callbacks = geom.plot_layer(layer) if callbacks: for callback in callbacks: fn = getattr(axs, callback['function']) fn(*callback['args']) # Handling the details of the chart here; probably be a better # way to do this... if self.title: plt.title(self.title) if self.xlab: if self.facet_type == "grid": fig.text(0.5, 0.025, self.xlab) else: plt.xlabel(self.xlab) if self.ylab: if self.facet_type == "grid": fig.text(0.025, 0.5, self.ylab, rotation='vertical') else: plt.ylabel(self.ylab) if self.xmajor_locator: plt.gca().xaxis.set_major_locator(self.xmajor_locator) if self.xtick_formatter: plt.gca().xaxis.set_major_formatter(self.xtick_formatter) fig.autofmt_xdate() if self.xbreaks: # xbreaks is a list manually provided plt.gca().xaxis.set_ticks(self.xbreaks) if self.xtick_labels: plt.gca().xaxis.set_ticklabels(self.xtick_labels) if self.ytick_formatter: plt.gca().yaxis.set_major_formatter(self.ytick_formatter) if self.xlimits: plt.xlim(self.xlimits) if self.ylimits: plt.ylim(self.ylimits) if self.scale_y_reverse: plt.gca().invert_yaxis() if self.scale_x_reverse: plt.gca().invert_xaxis() # TODO: Having some issues here with things that shouldn't have a legend # or at least shouldn't get shrunk to accomodate one. Need some sort of # test in place to prevent this OR prevent legend getting set to True. if self.legend: if self.facets: ax = axs[0][self.n_wide - 1] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) else: box = axs.get_position() axs.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax = axs cntr = 0 for ltype, legend in self.legend.items(): lname = self.aesthetics.get(ltype, ltype) new_legend = draw_legend(ax, legend, ltype, lname, cntr) ax.add_artist(new_legend) cntr += 1 # Finaly apply any post plot callbacks (theming, etc) if self.theme_applied: for ax in plt.gcf().axes: self._apply_post_plot_callbacks(ax) else: for ax in plt.gcf().axes: _theme_grey_post_plot_callback(ax) return plt.gcf() def _get_layers(self, data=None, aes=None): # This is handy because... (something to do w/ facets?) if data is None: data = self.data if aes is None: aes = self.aesthetics # We want everything to be a DataFrame. We're going to default # to key to handle items where the user hard codes a aesthetic # (i.e. alpha=0.6) mapping = pd.DataFrame({ ae: data.get(key, key) for ae, key in aes.items() }) if "color" in mapping: mapping['color'] = data['color_mapping'] if "size" in mapping: mapping['size'] = data['size_mapping'] if "shape" in mapping: mapping['marker'] = data['shape_mapping'] del mapping['shape'] if "linestyle" in mapping: mapping['linestyle'] = data['linestyle_mapping'] # Default the x and y axis labels to the name of the column if "x" in aes and self.xlab is None: self.xlab = aes['x'] if "y" in aes and self.ylab is None: self.ylab = aes['y'] # Automatically drop any row that has an NA value mapping = mapping.dropna() discrete_aes = [ae for ae in self.DISCRETE if ae in mapping] layers = [] if len(discrete_aes) == 0: frame = mapping.to_dict('list') layers.append(frame) else: for name, frame in mapping.groupby(discrete_aes): frame = frame.to_dict('list') for ae in self.DISCRETE: if ae in frame: frame[ae] = frame[ae][0] layers.append(frame) return layers def add_to_legend(self, legend_type, legend_dict, scale_type="discrete"): """Adds the the specified legend to the legend Parameter --------- legend_type : str type of legend, one of "color", "linestyle", "marker", "size" legend_dict : dict a dictionary of {visual_value: legend_key} where visual_value is color value, line style, marker character, or size value; and legend_key is a quantile. scale_type : str either "discrete" (default) or "continuous"; usually only color needs to specify which kind of legend should be drawn, all other scales will get a discrete scale. """ # scale_type is up to now unused # TODO: what happens if we add a second color mapping? # Currently the color mapping in the legend is overwritten. # What does ggplot do in such a case? if legend_type in self.legend: pass #msg = "Adding a secondary mapping of {0} is unsupported and no legend for this mapping is added.\n" #sys.stderr.write(msg.format(str(legend_type))) self.legend[legend_type] = legend_dict def _apply_post_plot_callbacks(self, axis): for cb in self.post_plot_callbacks: cb(axis) def _is_identity(x): if x in colors.COLORS: return True elif x in shapes.SHAPES: return True elif isinstance(x, (float, int)): return True else: return False def _build_df_from_transforms(data, aes): """Adds columns from the in aes included transformations Possible transformations are "factor(<col>)" and expresions which can be used with eval. Parameters ---------- data : DataFrame the original dataframe aes : aesthetics the aesthetic Returns ------- data : DateFrame Transformend DataFrame """ for ae, name in aes.items(): if name not in data and not _is_identity(name): # Look for alias/lambda functions result = re.findall(r'(?:[A-Z])|(?:[A-Za_-z0-9]+)|(?:[/*+_=\(\)-])', name) if re.match("factor[(][A-Za-z_0-9]+[)]", name): m = re.search("factor[(]([A-Za-z_0-9]+)[)]", name) data[name] = data[m.group(1)].apply(str) else: lambda_column = "" for item in result: if re.match("[/*+_=\(\)-]", item): pass elif re.match("^[0-9.]+$", item): pass else: item = "data.get('%s')" % item lambda_column += item data[name] = eval(lambda_column) return data keeping changes from HEAD from __future__ import (absolute_import, division, print_function, unicode_literals) import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from .components import aes, assign_visual_mapping from .components import colors, shapes from .components.legend import draw_legend from .geoms import * from .scales import * from .themes.theme_gray import _set_default_theme_rcparams from .themes.theme_gray import _theme_grey_post_plot_callback import ggplot.utils.six as six __ALL__ = ["ggplot"] import sys import re import warnings # Show plots if in interactive mode if sys.flags.interactive: plt.ion() class ggplot(object): """ ggplot is the base layer or object that you use to define the components of your chart (x and y axis, shapes, colors, etc.). You can combine it with layers (or geoms) to make complex graphics with minimal effort. Parameters ----------- aesthetics : aes (ggplot.components.aes.aes) aesthetics of your plot data : pandas DataFrame (pd.DataFrame) a DataFrame with the data you want to plot Examples ---------- >>> p = ggplot(aes(x='x', y='y'), data=diamonds) >>> print(p + geom_point()) """ CONTINUOUS = ['x', 'y', 'size', 'alpha'] DISCRETE = ['color', 'shape', 'marker', 'alpha', 'linestyle'] def __init__(self, aesthetics, data): # ggplot should just 'figure out' which is which if not isinstance(data, pd.DataFrame): aesthetics, data = data, aesthetics self.aesthetics = aesthetics self.data = data self.data = _build_df_from_transforms(self.data, self.aesthetics) # defaults self.geoms = [] self.n_wide = 1 self.n_high = 1 self.n_dim_x = None self.n_dim_y = None # facets self.facets = [] self.facet_type = None self.facet_scales = None self.facet_pairs = [] # used by facet_grid # components self.title = None self.xlab = None self.ylab = None # format for x/y major ticks self.xtick_formatter = None self.xbreaks = None self.xtick_labels = None self.xmajor_locator = None self.xminor_locator = None self.ytick_formatter = None self.xlimits = None self.ylimits = None self.scale_y_reverse = None self.scale_x_reverse = None # legend is a dictionary of {legend_type: {visual_value: legend_key}}, # where legend_type is one of "color", "linestyle", "marker", "size"; # visual_value is color value, line style, marker character, or size # value; and legend_key is a quantile. self.legend = {} # Theme releated options # this must be set by any theme to prevent addig the default theme self.theme_applied = False self.rcParams = {} # Callbacks to change aspects of each axis self.post_plot_callbacks = [] # continuous color configs self.color_scale = None self.colormap = plt.cm.Blues self.manual_color_list = None def __repr__(self): """Print/show the plot""" figure = self.draw() # We're going to default to making the plot appear when __repr__ is # called. #figure.show() # doesn't work in ipython notebook plt.show() # TODO: We can probably get more sugary with this return "<ggplot: (%d)>" % self.__hash__() def draw(self): # Adding rc=self.rcParams does not validate/parses the params which then # throws an error during plotting! with mpl.rc_context(): if not self.theme_applied: _set_default_theme_rcparams(mpl) # will be empty if no theme was applied for key in six.iterkeys(self.rcParams): val = self.rcParams[key] # there is a bug in matplotlib which does not allow None directly # https://github.com/matplotlib/matplotlib/issues/2543 try: if key == 'text.dvipnghack' and val is None: val = "none" mpl.rcParams[key] = val except Exception as e: msg = """Setting "mpl.rcParams['%s']=%s" raised an Exception: %s""" % (key, str(val), str(e)) warnings.warn(msg, RuntimeWarning) # draw is not allowed to show a plot, so we can use to result for ggsave # This sets a rcparam, so we don't have to undo it after plotting mpl.interactive(False) if self.facet_type == "grid": fig, axs = plt.subplots(self.n_high, self.n_wide, sharex=True, sharey=True) plt.subplots_adjust(wspace=.05, hspace=.05) elif self.facet_type == "wrap": # add (more than) the needed number of subplots fig, axs = plt.subplots(self.n_high, self.n_wide) # there are some extra, remove the plots subplots_available = self.n_wide * self.n_high extra_subplots = subplots_available - self.n_dim_x for extra_plot in axs.flatten()[-extra_subplots:]: extra_plot.axis('off') # plots is a mapping from xth-plot -> subplot position plots = [] for x in range(self.n_wide): for y in range(self.n_high): plots.append((x, y)) plots = sorted(plots, key=lambda x: x[1] + x[0] * self.n_high + 1) else: fig, axs = plt.subplots(self.n_high, self.n_wide) axs = np.atleast_2d(axs) # Set the default plot to the first one plt.subplot(self.n_wide, self.n_high, 1) # Aes need to be initialized BEFORE we start faceting. This is b/c # we want to have a consistent aes mapping across facets. self.data = assign_visual_mapping(self.data, self.aesthetics, self) # Faceting just means doing an additional groupby. The # dimensions of the plot remain the same if self.facets: # the current subplot in the axs and plots cntr = 0 if len(self.facets) == 2 and self.facet_type != "wrap": # store the extreme x and y coordinates of each pair of axes axis_extremes = np.zeros(shape=(self.n_high * self.n_wide, 4)) xlab_offset = .15 for _iter, (facets, frame) in enumerate(self.data.groupby(self.facets)): pos = self.facet_pairs.index(facets) + 1 plt.subplot(self.n_wide, self.n_high, pos) for layer in self._get_layers(frame): for geom in self.geoms: callbacks = geom.plot_layer(layer) axis_extremes[_iter] = [min(plt.xlim()), max(plt.xlim()), min(plt.ylim()), max(plt.ylim())] # find the grid wide data extremeties xlab_min, ylab_min = np.min(axis_extremes, axis=0)[[0, 2]] xlab_max, ylab_max = np.max(axis_extremes, axis=0)[[1, 3]] # position of vertical labels for facet grid xlab_pos = xlab_max + xlab_offset ylab_pos = ylab_max - float(ylab_max - ylab_min) / 2 # This needs to enumerate all possibilities for _iter, facets in enumerate(self.facet_pairs): pos = _iter + 1 if pos <= self.n_high: plt.subplot(self.n_wide, self.n_high, pos) for layer in self._get_layers(self.data): for geom in self.geoms: callbacks = geom.plot_layer(layer) axis_extremes[_iter] = [min(plt.xlim()), max(plt.xlim()), min(plt.ylim()), max(plt.ylim())] # find the grid wide data extremeties xlab_min, ylab_min = np.min(axis_extremes, axis=0)[[0, 2]] xlab_max, ylab_max = np.max(axis_extremes, axis=0)[[1, 3]] # position of vertical labels for facet grid xlab_pos = xlab_max + xlab_offset ylab_pos = ylab_max - float(ylab_max - ylab_min) / 2 # This needs to enumerate all possibilities for pos, facets in enumerate(self.facet_pairs): pos += 1 if pos <= self.n_high: plt.subplot(self.n_wide, self.n_high, pos) plt.table(cellText=[[facets[1]]], loc='top', cellLoc='center', cellColours=[['lightgrey']]) if (pos % self.n_high) == 0: plt.subplot(self.n_wide, self.n_high, pos) x = max(plt.xticks()[0]) y = max(plt.yticks()[0]) ax = axs[pos % self.n_high][pos % self.n_wide] plt.text(xlab_pos, ylab_pos, facets[0], bbox=dict( facecolor='lightgrey', edgecolor='black', color='black', width=mpl.rcParams['font.size'] * 1.65 ), fontdict=dict(rotation=-90, verticalalignment="center", horizontalalignment='left') ) plt.subplot(self.n_wide, self.n_high, pos) # Handle the different scale types here # (free|free_y|free_x|None) and also make sure that only the # left column gets y scales and the bottom row gets x scales scale_facet_grid(self.n_wide, self.n_high, self.facet_pairs, self.facet_scales) else: # now facet_wrap > 2 for facet, frame in self.data.groupby(self.facets): for layer in self._get_layers(frame): for geom in self.geoms: if self.facet_type == "wrap": if cntr + 1 > len(plots): continue pos = plots[cntr] if pos is None: continue y_i, x_i = pos pos = x_i + y_i * self.n_high + 1 ax = plt.subplot(self.n_wide, self.n_high, pos) else: ax = plt.subplot(self.n_wide, self.n_high, cntr) # TODO: this needs some work if (cntr % self.n_high) == -1: plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off') callbacks = geom.plot_layer(layer) if callbacks: for callback in callbacks: fn = getattr(ax, callback['function']) fn(*callback['args']) title = facet if isinstance(facet, tuple): title = ", ".join(facet) plt.table(cellText=[[title]], loc='top', cellLoc='center', cellColours=[['lightgrey']]) cntr += 1 # NOTE: Passing n_high for cols (instead of n_wide) and # n_wide for rows because in all previous calls to # plt.subplot, n_wide is passed as the number of rows, not # columns. scale_facet_wrap(self.n_wide, self.n_high, range(cntr), self.facet_scales) else: # no faceting for geom in self.geoms: _aes = self.aesthetics if geom.aes: # update the default mapping with the geom specific one _aes = _aes.copy() _aes.update(geom.aes) if not geom.data is None: data = _build_df_from_transforms(geom.data, _aes) data = assign_visual_mapping(data, _aes, self) else: data = self.data for layer in self._get_layers(data, _aes): plt.subplot(1, 1, 1) callbacks = geom.plot_layer(layer) if callbacks: for callback in callbacks: fn = getattr(axs[0][0], callback['function']) fn(*callback['args']) # Handling the details of the chart here; probably be a better # way to do this... if self.title: plt.title(self.title) if self.xlab: if self.facet_type == "grid": fig.text(0.5, 0.025, self.xlab) else: plt.xlabel(self.xlab) if self.ylab: if self.facet_type == "grid": fig.text(0.025, 0.5, self.ylab, rotation='vertical') else: plt.ylabel(self.ylab) if self.xmajor_locator: plt.gca().xaxis.set_major_locator(self.xmajor_locator) if self.xtick_formatter: plt.gca().xaxis.set_major_formatter(self.xtick_formatter) fig.autofmt_xdate() if self.xbreaks: # xbreaks is a list manually provided plt.gca().xaxis.set_ticks(self.xbreaks) if self.xtick_labels: plt.gca().xaxis.set_ticklabels(self.xtick_labels) if self.ytick_formatter: plt.gca().yaxis.set_major_formatter(self.ytick_formatter) if self.xlimits: plt.xlim(self.xlimits) if self.ylimits: plt.ylim(self.ylimits) if self.scale_y_reverse: plt.gca().invert_yaxis() if self.scale_x_reverse: plt.gca().invert_xaxis() # TODO: Having some issues here with things that shouldn't have a legend # or at least shouldn't get shrunk to accomodate one. Need some sort of # test in place to prevent this OR prevent legend getting set to True. if self.legend: if self.facets: ax = axs[0][self.n_wide - 1] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) else: box = axs.get_position() axs.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax = axs cntr = 0 for ltype, legend in self.legend.items(): lname = self.aesthetics.get(ltype, ltype) new_legend = draw_legend(ax, legend, ltype, lname, cntr) ax.add_artist(new_legend) cntr += 1 # Finaly apply any post plot callbacks (theming, etc) if self.theme_applied: for ax in plt.gcf().axes: self._apply_post_plot_callbacks(ax) else: for ax in plt.gcf().axes: _theme_grey_post_plot_callback(ax) return plt.gcf() def _get_layers(self, data=None, aes=None): # This is handy because... (something to do w/ facets?) if data is None: data = self.data if aes is None: aes = self.aesthetics # We want everything to be a DataFrame. We're going to default # to key to handle items where the user hard codes a aesthetic # (i.e. alpha=0.6) mapping = pd.DataFrame({ ae: data.get(key, key) for ae, key in aes.items() }) if "color" in mapping: mapping['color'] = data['color_mapping'] if "size" in mapping: mapping['size'] = data['size_mapping'] if "shape" in mapping: mapping['marker'] = data['shape_mapping'] del mapping['shape'] if "linestyle" in mapping: mapping['linestyle'] = data['linestyle_mapping'] # Default the x and y axis labels to the name of the column if "x" in aes and self.xlab is None: self.xlab = aes['x'] if "y" in aes and self.ylab is None: self.ylab = aes['y'] # Automatically drop any row that has an NA value mapping = mapping.dropna() discrete_aes = [ae for ae in self.DISCRETE if ae in mapping] layers = [] if len(discrete_aes) == 0: frame = mapping.to_dict('list') layers.append(frame) else: for name, frame in mapping.groupby(discrete_aes): frame = frame.to_dict('list') for ae in self.DISCRETE: if ae in frame: frame[ae] = frame[ae][0] layers.append(frame) return layers def add_to_legend(self, legend_type, legend_dict, scale_type="discrete"): """Adds the the specified legend to the legend Parameter --------- legend_type : str type of legend, one of "color", "linestyle", "marker", "size" legend_dict : dict a dictionary of {visual_value: legend_key} where visual_value is color value, line style, marker character, or size value; and legend_key is a quantile. scale_type : str either "discrete" (default) or "continuous"; usually only color needs to specify which kind of legend should be drawn, all other scales will get a discrete scale. """ # scale_type is up to now unused # TODO: what happens if we add a second color mapping? # Currently the color mapping in the legend is overwritten. # What does ggplot do in such a case? if legend_type in self.legend: pass #msg = "Adding a secondary mapping of {0} is unsupported and no legend for this mapping is added.\n" #sys.stderr.write(msg.format(str(legend_type))) self.legend[legend_type] = legend_dict def _apply_post_plot_callbacks(self, axis): for cb in self.post_plot_callbacks: cb(axis) def _is_identity(x): if x in colors.COLORS: return True elif x in shapes.SHAPES: return True elif isinstance(x, (float, int)): return True else: return False def _build_df_from_transforms(data, aes): """Adds columns from the in aes included transformations Possible transformations are "factor(<col>)" and expresions which can be used with eval. Parameters ---------- data : DataFrame the original dataframe aes : aesthetics the aesthetic Returns ------- data : DateFrame Transformend DataFrame """ for ae, name in aes.items(): if name not in data and not _is_identity(name): # Look for alias/lambda functions result = re.findall(r'(?:[A-Z])|(?:[A-Za_-z0-9]+)|(?:[/*+_=\(\)-])', name) if re.match("factor[(][A-Za-z_0-9]+[)]", name): m = re.search("factor[(]([A-Za-z_0-9]+)[)]", name) data[name] = data[m.group(1)].apply(str) else: lambda_column = "" for item in result: if re.match("[/*+_=\(\)-]", item): pass elif re.match("^[0-9.]+$", item): pass else: item = "data.get('%s')" % item lambda_column += item data[name] = eval(lambda_column) return data
# -*- coding: utf-8 -*- # # dp for Tornado # YoungYong Park (youngyongpark@gmail.com) # 2014.11.11 # from __future__ import absolute_import from engine.helper import Helper as dpHelper import string unicode_type = type(u'') class StringHelper(dpHelper): @property def ascii_uppercase(self): return string.ascii_uppercase @property def ascii_letters(self): return string.ascii_letters def is_string(self, s): if self.helper.system.py_version <= 2: types = basestring, else: types = str, return True if isinstance(s, types) else False def random_string(self, length): return ''.join(self.helper.random.sample(self.ascii_letters, length)) def check_exist_repeated_text(self, s, criteria=3): if not self.is_string(s): return None k = s[0] n = 0 for c in s: if c == k: n += 1 if n >= criteria: return True else: k = c n = 1 return False def to_str(self, s, preserve_none=True): if s is None: return s if not self.is_string(s): s = str(s) if type(s) == unicode_type: if self.helper.system.py_version <= 2: return s.encode('UTF-8') else: return s else: return s def to_unicode(self, s, preserve_none=True): if s is None: return s if not self.is_string(s): s = str(s) if type(s) != unicode_type: return s.decode('UTF-8') else: return s Add is_string alias to is_str. # -*- coding: utf-8 -*- # # dp for Tornado # YoungYong Park (youngyongpark@gmail.com) # 2014.11.11 # from __future__ import absolute_import from engine.helper import Helper as dpHelper import string unicode_type = type(u'') class StringHelper(dpHelper): @property def ascii_uppercase(self): return string.ascii_uppercase @property def ascii_letters(self): return string.ascii_letters def is_str(self, s): return self.is_string(s) def is_string(self, s): if self.helper.system.py_version <= 2: types = basestring, else: types = str, return True if isinstance(s, types) else False def random_string(self, length): return ''.join(self.helper.random.sample(self.ascii_letters, length)) def check_exist_repeated_text(self, s, criteria=3): if not self.is_string(s): return None k = s[0] n = 0 for c in s: if c == k: n += 1 if n >= criteria: return True else: k = c n = 1 return False def to_str(self, s, preserve_none=True): if s is None: return s if not self.is_string(s): s = str(s) if type(s) == unicode_type: if self.helper.system.py_version <= 2: return s.encode('UTF-8') else: return s else: return s def to_unicode(self, s, preserve_none=True): if s is None: return s if not self.is_string(s): s = str(s) if type(s) != unicode_type: return s.decode('UTF-8') else: return s
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for softmax_policy.""" from absl.testing import absltest from absl.testing import parameterized from open_spiel.python import policy from open_spiel.python.mfg import value from open_spiel.python.mfg.algorithms import best_response_value from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms import greedy_policy from open_spiel.python.mfg.algorithms import softmax_policy from open_spiel.python.mfg.algorithms import policy_value from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import import pyspiel class SoftmaxPolicyTest(parameterized.TestCase): @parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'), ('cpp', 'mfg_crowd_modelling')) def test_softmax(self, name): """Check if the softmax policy works as expected. The test checks that: - uniform prior policy gives the same results than no prior. - very high temperature gives almost a uniform policy. - very low temperature gives almost a deterministic policy for the best action. Args: name: Name of the game. """ game = pyspiel.load_game(name) uniform_policy = policy.UniformRandomPolicy(game) dist = distribution.DistributionPolicy(game, uniform_policy) br_value = best_response_value.BestResponse( game, dist, value.TabularValueFunction(game)) br_init_val = br_value(game.new_initial_state()) # uniform prior policy gives the same results than no prior. softmax_pi_uniform_prior = softmax_policy.SoftmaxPolicy(game, None, 1.0, br_value, uniform_policy).to_tabular() softmax_pi_uniform_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_uniform_prior, value.TabularValueFunction(game)) softmax_pi_uniform_prior_init_val = softmax_pi_uniform_prior_value(game.new_initial_state()) softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 1.0, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(softmax_pi_uniform_prior_init_val, softmax_pi_no_prior_init_val) # very high temperature gives almost a uniform policy. uniform_policy = uniform_policy.to_tabular() uniform_value = policy_value.PolicyValue(game, dist, uniform_policy, value.TabularValueFunction(game)) uniform_init_val = uniform_value(game.new_initial_state()) softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 100000000, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(uniform_init_val, softmax_pi_no_prior_init_val) # very low temperature gives almost a best response policy. softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 0.0001, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(br_init_val, softmax_pi_no_prior_init_val) if __name__ == '__main__': absltest.main() Update softmax_policy_test.py Removed unused import. # Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for softmax_policy.""" from absl.testing import absltest from absl.testing import parameterized from open_spiel.python import policy from open_spiel.python.mfg import value from open_spiel.python.mfg.algorithms import best_response_value from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms import softmax_policy from open_spiel.python.mfg.algorithms import policy_value from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import import pyspiel class SoftmaxPolicyTest(parameterized.TestCase): @parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'), ('cpp', 'mfg_crowd_modelling')) def test_softmax(self, name): """Check if the softmax policy works as expected. The test checks that: - uniform prior policy gives the same results than no prior. - very high temperature gives almost a uniform policy. - very low temperature gives almost a deterministic policy for the best action. Args: name: Name of the game. """ game = pyspiel.load_game(name) uniform_policy = policy.UniformRandomPolicy(game) dist = distribution.DistributionPolicy(game, uniform_policy) br_value = best_response_value.BestResponse( game, dist, value.TabularValueFunction(game)) br_init_val = br_value(game.new_initial_state()) # uniform prior policy gives the same results than no prior. softmax_pi_uniform_prior = softmax_policy.SoftmaxPolicy(game, None, 1.0, br_value, uniform_policy).to_tabular() softmax_pi_uniform_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_uniform_prior, value.TabularValueFunction(game)) softmax_pi_uniform_prior_init_val = softmax_pi_uniform_prior_value(game.new_initial_state()) softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 1.0, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(softmax_pi_uniform_prior_init_val, softmax_pi_no_prior_init_val) # very high temperature gives almost a uniform policy. uniform_policy = uniform_policy.to_tabular() uniform_value = policy_value.PolicyValue(game, dist, uniform_policy, value.TabularValueFunction(game)) uniform_init_val = uniform_value(game.new_initial_state()) softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 100000000, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(uniform_init_val, softmax_pi_no_prior_init_val) # very low temperature gives almost a best response policy. softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 0.0001, br_value, None) softmax_pi_no_prior_value = policy_value.PolicyValue(game, dist, softmax_pi_no_prior, value.TabularValueFunction(game)) softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(game.new_initial_state()) self.assertAlmostEqual(br_init_val, softmax_pi_no_prior_init_val) if __name__ == '__main__': absltest.main()
from flask import Flask, abort import gzip import configparser import os import uuid import json app = Flask(__name__) FILE_NAME = "picc/config.conf" if not os.path.exists("picc"): os.makedirs("picc") config = configparser.ConfigParser() shared_secret = None # Initial Setup if not os.path.isfile(FILE_NAME): shared_secret = str(uuid.uuid4()) config.set(configparser.DEFAULTSECT, "shared-secret", value=shared_secret) config.write(open(FILE_NAME, 'w+')) else: config.read(FILE_NAME) shared_secret = config.get(configparser.DEFAULTSECT, "shared-secret") class VideoStitcher: _video_id = None _name = None def __init__(self, video_id, name): self._video_id = video_id self._name = name @app.route("/auth/<secret>/<public_key>", methods=['GET']) def main_root(secret=None, public_key=None): if not secret or not public_key: abort(401) return if secret != shared_secret: abort(403) return pass @app.route("/send_image", methods=['POST']) def send_image(): pass if __name__ == "__main__": app.run() Why does it need ID. from flask import Flask, abort import gzip import configparser import os import uuid import json app = Flask(__name__) FILE_NAME = "picc/config.conf" if not os.path.exists("picc"): os.makedirs("picc") config = configparser.ConfigParser() shared_secret = None # Initial Setup if not os.path.isfile(FILE_NAME): shared_secret = str(uuid.uuid4()) config.set(configparser.DEFAULTSECT, "shared-secret", value=shared_secret) config.write(open(FILE_NAME, 'w+')) else: config.read(FILE_NAME) shared_secret = config.get(configparser.DEFAULTSECT, "shared-secret") class VideoStitcher: _name = None def __init__(self, name): self._name = name @app.route("/auth/<secret>/<public_key>", methods=['GET']) def main_root(secret=None, public_key=None): if not secret or not public_key: abort(401) return if secret != shared_secret: abort(403) return pass @app.route("/send_image", methods=['POST']) def send_image(): pass if __name__ == "__main__": app.run()
#!/usr/bin/env python3 import asyncio import sys sys.path.append("..") import jauxiliar as jaux import josecommon as jcommon import decimal import json import os import time from random import SystemRandom random = SystemRandom() PRICE_TABLE = { 'api': ('Tax for Commands that use APIs', jcommon.API_TAX_PRICE, \ ('wolframalpha', 'temperature', 'money', 'bitcoin', '8ball', \ 'xkcd', 'sndc')), 'img': ('Price for all commands in `joseimages`', jcommon.IMG_PRICE, \ ('derpibooru', 'hypno', 'e621', 'yandere')), 'opr': ('Operational tax for commands that use a lot of processing', jcommon.OP_TAX_PRICE, \ ('datamosh', 'yt')) } # 1% BASE_CHANCE = decimal.Decimal(1) STEALDB_PATH = 'db/steal.json' ARREST_TIME = 28800 # 8 hours DEFAULT_STEALDB = '''{ "points": {}, "cdown": {}, "period": {} }''' HELPTEXT_JC_STEAL = """ `j!steal` allows you to steal an arbritary amount of money from anyone. use `j!stealstat` to see your status in the stealing business. The chance of getting caught increases the more you steal. When using `j!steal`, `res` and `prob` show up, `res` is a random value and if it is greater than `prob`, you are arrested. `prob` is calculated using the target's current wallet and the amount you want to steal from them. """ class JoseCoin(jaux.Auxiliar): def __init__(self, _client): jaux.Auxiliar.__init__(self, _client) self.counter = 0 def to_hours(self, seconds): return seconds / 60 / 60 async def josecoin_save(self, message, dbg_flag=True): res = self.jcoin.save('jcoin/josecoin.db') if not res[0]: self.logger.error("jcerr: %r", res) if message is not None: await self.client.send_message(message.channel, \ "jcerr: `%r`" % res) return res async def josecoin_load(self, message, dbg_flag=True): res = self.jcoin.load('jcoin/josecoin.db') if not res[0]: self.logger.error("jcerr: %r", res) if message is not None: await self.client.send_message(message.channel, \ "jcerr: `%r`" % res) return res async def save_steal_db(self): try: self.logger.info("savedb:stealdb") json.dump(self.stealdb, open(STEALDB_PATH, 'w')) return True, '' except Exception as err: return False, str(err) async def load_steal_db(self): try: self.stealdb = {} if not os.path.isfile(STEALDB_PATH): with open(STEALDB_PATH, 'w') as stealdbfile: stealdbfile.write(DEFAULT_STEALDB) self.stealdb = json.load(open(STEALDB_PATH, 'r')) return True, '' except Exception as err: return False, str(err) async def ext_load(self): res_jc = await self.josecoin_load(None) if not res_jc[0]: return res_jc res_sdb = await self.load_steal_db() if not res_sdb[0]: return res_sdb return True, '' async def ext_unload(self): res_jc = await self.josecoin_load(None) if not res_jc[0]: return res_jc res_sdb = await self.save_steal_db() if not res_sdb[0]: return res_sdb return True, '' async def e_any_message(self, message, cxt): self.counter += 1 if self.counter > 11: await self.josecoin_save(message, False) self.counter = 0 async def e_on_message(self, message, cxt): probability = jcommon.JC_PROBABILITY if message.author.id in self.stealdb['cdown']: # get type of cooldown # type 0 = arrest # type 1 = get more stealing points arrest_data = self.stealdb['cdown'][message.author.id] if arrest_data[1] == 0: probability /= 2 if random.random() > probability: return if message.channel.is_private: return author_id = str(message.author.id) if author_id not in self.jcoin.data: return amount = random.choice(jcommon.JC_REWARDS) if amount != 0: res = self.jcoin.transfer(self.jcoin.jose_id, author_id, \ amount, self.jcoin.LEDGER_PATH) if res[0]: # delay because ratelimits???? need to study that await asyncio.sleep(0.5) await self.client.add_reaction(message, '💰') else: jcommon.logger.error("do_josecoin->jc->err: %s", res[1]) await cxt.say("jc->err: %s", (res[1],)) async def c_prices(self, message, args, cxt): '''`j!prices` - show price categories''' res = [] for cat in sorted(PRICE_TABLE): data = PRICE_TABLE[cat] desc = data[0] price = data[1] commands = data[2] _cmdlist = ['j!{}'.format(cmd) for cmd in commands] cmdlist = ', '.join(_cmdlist) res.append("`%s`: %.2fJC, *%s*, `%s`" % (cat, price, desc, cmdlist)) await cxt.say('\n'.join(res)) async def c_wallet(self, message, args, cxt): '''`j!wallet [@mention]` - your wallet(or other person's wallet)''' args = message.content.split(' ') id_check = None if len(args) < 2: id_check = message.author.id else: id_check = await jcommon.parse_id(args[1], message) res = self.jcoin.get(id_check) if res[0]: accdata = res[1] await cxt.say(('%s -> %.2f' % (accdata['name'], accdata['amount']))) else: await cxt.say('account not found(`id:%s`)' % (id_check)) async def c_balance(self, message, args, cxt): '''`j!balance [@mention]` - alias to `j!wallet`''' await self.c_wallet(message, args, cxt) async def c_bal(self, message, args, cxt): '''`j!balance [@mention]` - alias to `j!wallet`''' await self.c_wallet(message, args, cxt) async def c_account(self, message, args, cxt): '''`j!account` - create a new JoséCoin account''' self.logger.info("new jc account, id = %s" % message.author.id) res = self.jcoin.new_acc(message.author.id, str(message.author)) if res[0]: await cxt.say(res[1]) else: await cxt.say('jc->err: %s' % res[1]) async def c_write(self, message, args, cxt): '''`j!write @mention new_amount` - Overwrite an account's josecoins''' global data await self.is_admin(message.author.id) if len(args) != 3: await cxt.say(self.c_write.__doc__) return try: id_from = await jcommon.parse_id(args[1], message) new_amount = decimal.Decimal(args[2]) except Exception as e: await cxt.say("huh, exception thingy... `%r`", (e,)) return self.jcoin.data[id_from]['amount'] = new_amount await cxt.say("<@%s> has %.2fJC now" % (id_from, \ self.jcoin.data[id_from]['amount'])) self.logger.info("%s Wrote %.2fJC to Account %s" % \ (str(message.author), new_amount, id_from)) async def c_jcsend(self, message, args, cxt): '''`j!jcsend @mention amount` - send JoséCoins to someone''' if len(args) != 3: await cxt.say(self.c_jcsend.__doc__) return id_to = args[1] try: amount = decimal.Decimal(args[2]) except ValueError: await cxt.say("ValueError: error parsing value") return except Exception as e: await cxt.say("Exception: `%r`" % e) return id_from = message.author.id id_to = await jcommon.parse_id(id_to, message) res = self.jcoin.transfer(id_from, id_to, \ amount, self.jcoin.LEDGER_PATH) await self.josecoin_save(message, False) if res[0]: await cxt.say(res[1]) else: await cxt.say('jc_err: `%s`' % res[1]) async def c_ltop10(self, message, args, cxt): '''`j!ltop10` - local top 10 people who have high josecoins''' if message.server is None: await cxt.say("You're not in a server, dummy!") return guild = message.server jcdata = dict(self.jcoin.data) # copy range_max = 11 # default 10 users if len(args) > 1: range_max = int(args[1]) + 1 if range_max >= 16: await cxt.say("LimitError: values higher than 16 aren't valid") return elif range_max <= 0: await cxt.say("haha no") return maior = { 'id': 0, 'name': '', 'amount': 0.0, } order = [] for i in range(1,range_max): if len(jcdata) < 1: break for member in guild.members: accid = member.id if accid in jcdata: acc = jcdata[accid] name, amount = acc['name'], acc['amount'] if amount > maior['amount']: maior['id'] = accid maior['name'] = name maior['amount'] = amount else: pass if maior['id'] in jcdata: del jcdata[maior['id']] order.append('%d. %s -> %.2f' % \ (i, maior['name'], maior['amount'])) # reset to next maior = { 'id': 0, 'name': '', 'amount': 0.0, } await cxt.say('\n'.join(order)) return async def c_top10(self, message, args, cxt): jcdata = dict(self.jcoin.data) # copy range_max = 11 # default 10 users if len(args) > 1: range_max = int(args[1]) + 1 maior = { 'id': 0, 'name': '', 'amount': 0.0, } if range_max >= 16: await cxt.say("LimitError: values higher than 16 aren't valid") return elif range_max <= 0: await cxt.say("haha no") return order = [] for i in range(1,range_max): if len(jcdata) < 1: break for accid in jcdata: acc = jcdata[accid] name, amount = acc['name'], acc['amount'] if amount > maior['amount']: maior['id'] = accid maior['name'] = name maior['amount'] = amount del jcdata[maior['id']] order.append('%d. %s -> %.2f' % \ (i, maior['name'], maior['amount'])) # reset to next maior = { 'id': 0, 'name': '', 'amount': 0.0, } await cxt.say('\n'.join(order)) return async def c_hsteal(self, message, args, cxt): await cxt.say(HELPTEXT_JC_STEAL) async def c_stealstat(self, message, args, cxt): # get status from person personid = message.author.id res = [] points = self.stealdb['points'].get(personid, 3) cooldown = self.stealdb['cdown'].get(personid, None) grace_period = self.stealdb['period'].get(personid, None) res.append("**%s**, you have %d stealing points" % (str(message.author), points)) if cooldown is not None: cooldown_sec, cooldown_type = cooldown cooldown_sec -= time.time() if cooldown_type == 0: res.append(":cop: you're in prison, %.2f hours remaining" % (self.to_hours(cooldown_sec),)) elif cooldown_type == 1: res.append(":alarm_clock: you're waiting for stealing points, %.2f hours remaining" % (self.to_hours(cooldown_sec),)) else: res.append(":warning: unknown cooldown type") if grace_period is not None: grace_period -= time.time() res.append(":angel: you're in grace period, %.2f hours remaining" % (self.to_hours(grace_period),)) await cxt.say('\n'.join(res)) async def do_arrest(self, thief_id, amount, arrest_type=0): self.stealdb['cdown'][thief_id] = (time.time() + ARREST_TIME, arrest_type) if arrest_type == 0: # pay half the amount fine = amount / decimal.Decimal(2) res = await self.jcoin.transfer(thief_id, self.jcoin.jose_id, fine) return res async def c_steal(self, message, args, cxt): '''`j!steal @target amount` - Steal JoséCoins from someone''' if len(args) < 2: await cxt.say(self.c_steal.__doc__) return # parse mention try: target_id = await jcommon.parse_id(args[1], message) except: await cxt.say("Error parsing `@target`") return try: amount = decimal.Decimal(args[2]) except: await cxt.say("Error parsing `amount`") return if message.author.id not in self.jcoin.data: await cxt.say("You don't have a josécoin account.") return if target_id not in self.jcoin.data: await cxt.say("The person you're trying to steal from doesn't have a JoséCoin account") return thief_id = message.author.id # check if thief has cooldowns in place cdown = self.stealdb['cdown'].get(thief_id, None) if cdown is not None: cooldown_end, cooldown_type = cdown remaining = cooldown_end - time.time() if cooldown_type == 0: await cxt.say(":cop: You are still in prison, wait %.2f hours", (self.to_hours(remaining),)) elif cooldown_type == 1: if remaining > 1: await cxt.say("Wait %.2f hours to regenerate your stealing points", (self.to_hours(remaining),)) else: await cxt.say("Stealing points regenerated!") del self.stealdb['points'][thief_id] await self.save_steal_db() return stealuses = self.stealdb['points'].get(thief_id, None) if stealuses is None: self.stealdb['points'][thief_id] = stealuses = 3 thief_user = message.author target_user = await self.client.get_user_info(target_id) grace_period = (self.stealdb['period'].get(target_id, 0) - time.time()) if grace_period > 0: await cxt.say("Target is in :angel: grace period :angel:") await cxt.say("%s tried to steal %.2fJC from you, but you have %.2f hours of grace period", \ target_user, (str(thief_user), amount, self.to_hours(grace_period))) return if stealuses < 1: res = await self.do_arrest(thief_id, amount, 1) await cxt.say("You don't have any more stealing points, wait 8 hours to get more.") return if target_id == self.jcoin.jose_id: arrest = await self.do_arrest(thief_id, amount) await cxt.say(":cop: You can't steal from José. Arrested for 8h\n`%s`", (arrest[1],)) return target_account = self.jcoin.get(target_id)[1] target_amount = target_account['amount'] if amount > target_amount: # automatically in prison arrest = await self.do_arrest(thief_id, amount) await cxt.say(":cop: Arrested because you tried to steal more than the target has, 8h jail time.\n`%s`", \ (arrest[1])) D = decimal.Decimal chance = (BASE_CHANCE + (target_amount / amount)) * D(0.3) if chance > 8: chance = 5 res = random.random() * 10 if res < chance: self.logger.info("Stealing %.2fJC from %s[%s] to %s[%s]", \ amount, target_account['name'], target_id, message.author, thief_id) # steal went good, make transfer ok = self.jcoin.transfer(target_id, thief_id, amount) # check transfer status if not ok[0]: await cxt.say("jc->err: %s", ok[1]) else: await cxt.say("`[res: %.2f < prob: %.2f]` Stealing went well, nobody noticed, you thief. \n`%s`", \ (res, chance, ok[1])) await cxt.say(":gun: You got robbed! The thief(%s) stole `%.2fJC` from you. 3 hour grace period", \ target_user, (str(thief_user), amount)) self.stealdb['period'][target_id] = time.time() + 10800 self.stealdb['points'][message.author.id] -= 1 else: # type 0 cooldown, you got arrested arrest = await self.do_arrest(thief_id, amount) await cxt.say("`[res: %.2f > prob: %.2f]` :cop: Arrested! got 8h cooldown.\n`%s`", \ (res, chance, arrest[1])) await self.save_steal_db() async def c_roubar(self, message, args, cxt): '''`j!roubar @target amount` - alias for `j!steal`''' await self.c_steal(message, args, cxt) jcoin: fix limiting in top10 commands #!/usr/bin/env python3 import asyncio import sys sys.path.append("..") import jauxiliar as jaux import josecommon as jcommon import decimal import json import os import time from random import SystemRandom random = SystemRandom() PRICE_TABLE = { 'api': ('Tax for Commands that use APIs', jcommon.API_TAX_PRICE, \ ('wolframalpha', 'temperature', 'money', 'bitcoin', '8ball', \ 'xkcd', 'sndc')), 'img': ('Price for all commands in `joseimages`', jcommon.IMG_PRICE, \ ('derpibooru', 'hypno', 'e621', 'yandere')), 'opr': ('Operational tax for commands that use a lot of processing', jcommon.OP_TAX_PRICE, \ ('datamosh', 'yt')) } # 1% BASE_CHANCE = decimal.Decimal(1) STEALDB_PATH = 'db/steal.json' ARREST_TIME = 28800 # 8 hours DEFAULT_STEALDB = '''{ "points": {}, "cdown": {}, "period": {} }''' HELPTEXT_JC_STEAL = """ `j!steal` allows you to steal an arbritary amount of money from anyone. use `j!stealstat` to see your status in the stealing business. The chance of getting caught increases the more you steal. When using `j!steal`, `res` and `prob` show up, `res` is a random value and if it is greater than `prob`, you are arrested. `prob` is calculated using the target's current wallet and the amount you want to steal from them. """ class JoseCoin(jaux.Auxiliar): def __init__(self, _client): jaux.Auxiliar.__init__(self, _client) self.counter = 0 def to_hours(self, seconds): return seconds / 60 / 60 async def josecoin_save(self, message, dbg_flag=True): res = self.jcoin.save('jcoin/josecoin.db') if not res[0]: self.logger.error("jcerr: %r", res) if message is not None: await self.client.send_message(message.channel, \ "jcerr: `%r`" % res) return res async def josecoin_load(self, message, dbg_flag=True): res = self.jcoin.load('jcoin/josecoin.db') if not res[0]: self.logger.error("jcerr: %r", res) if message is not None: await self.client.send_message(message.channel, \ "jcerr: `%r`" % res) return res async def save_steal_db(self): try: self.logger.info("savedb:stealdb") json.dump(self.stealdb, open(STEALDB_PATH, 'w')) return True, '' except Exception as err: return False, str(err) async def load_steal_db(self): try: self.stealdb = {} if not os.path.isfile(STEALDB_PATH): with open(STEALDB_PATH, 'w') as stealdbfile: stealdbfile.write(DEFAULT_STEALDB) self.stealdb = json.load(open(STEALDB_PATH, 'r')) return True, '' except Exception as err: return False, str(err) async def ext_load(self): res_jc = await self.josecoin_load(None) if not res_jc[0]: return res_jc res_sdb = await self.load_steal_db() if not res_sdb[0]: return res_sdb return True, '' async def ext_unload(self): res_jc = await self.josecoin_load(None) if not res_jc[0]: return res_jc res_sdb = await self.save_steal_db() if not res_sdb[0]: return res_sdb return True, '' async def e_any_message(self, message, cxt): self.counter += 1 if self.counter > 11: await self.josecoin_save(message, False) self.counter = 0 async def e_on_message(self, message, cxt): probability = jcommon.JC_PROBABILITY if message.author.id in self.stealdb['cdown']: # get type of cooldown # type 0 = arrest # type 1 = get more stealing points arrest_data = self.stealdb['cdown'][message.author.id] if arrest_data[1] == 0: probability /= 2 if random.random() > probability: return if message.channel.is_private: return author_id = str(message.author.id) if author_id not in self.jcoin.data: return amount = random.choice(jcommon.JC_REWARDS) if amount != 0: res = self.jcoin.transfer(self.jcoin.jose_id, author_id, \ amount, self.jcoin.LEDGER_PATH) if res[0]: # delay because ratelimits???? need to study that await asyncio.sleep(0.5) await self.client.add_reaction(message, '💰') else: jcommon.logger.error("do_josecoin->jc->err: %s", res[1]) await cxt.say("jc->err: %s", (res[1],)) async def c_prices(self, message, args, cxt): '''`j!prices` - show price categories''' res = [] for cat in sorted(PRICE_TABLE): data = PRICE_TABLE[cat] desc = data[0] price = data[1] commands = data[2] _cmdlist = ['j!{}'.format(cmd) for cmd in commands] cmdlist = ', '.join(_cmdlist) res.append("`%s`: %.2fJC, *%s*, `%s`" % (cat, price, desc, cmdlist)) await cxt.say('\n'.join(res)) async def c_wallet(self, message, args, cxt): '''`j!wallet [@mention]` - your wallet(or other person's wallet)''' args = message.content.split(' ') id_check = None if len(args) < 2: id_check = message.author.id else: id_check = await jcommon.parse_id(args[1], message) res = self.jcoin.get(id_check) if res[0]: accdata = res[1] await cxt.say(('%s -> %.2f' % (accdata['name'], accdata['amount']))) else: await cxt.say('account not found(`id:%s`)' % (id_check)) async def c_balance(self, message, args, cxt): '''`j!balance [@mention]` - alias to `j!wallet`''' await self.c_wallet(message, args, cxt) async def c_bal(self, message, args, cxt): '''`j!balance [@mention]` - alias to `j!wallet`''' await self.c_wallet(message, args, cxt) async def c_account(self, message, args, cxt): '''`j!account` - create a new JoséCoin account''' self.logger.info("new jc account, id = %s" % message.author.id) res = self.jcoin.new_acc(message.author.id, str(message.author)) if res[0]: await cxt.say(res[1]) else: await cxt.say('jc->err: %s' % res[1]) async def c_write(self, message, args, cxt): '''`j!write @mention new_amount` - Overwrite an account's josecoins''' global data await self.is_admin(message.author.id) if len(args) != 3: await cxt.say(self.c_write.__doc__) return try: id_from = await jcommon.parse_id(args[1], message) new_amount = decimal.Decimal(args[2]) except Exception as e: await cxt.say("huh, exception thingy... `%r`", (e,)) return self.jcoin.data[id_from]['amount'] = new_amount await cxt.say("<@%s> has %.2fJC now" % (id_from, \ self.jcoin.data[id_from]['amount'])) self.logger.info("%s Wrote %.2fJC to Account %s" % \ (str(message.author), new_amount, id_from)) async def c_jcsend(self, message, args, cxt): '''`j!jcsend @mention amount` - send JoséCoins to someone''' if len(args) != 3: await cxt.say(self.c_jcsend.__doc__) return id_to = args[1] try: amount = decimal.Decimal(args[2]) except ValueError: await cxt.say("ValueError: error parsing value") return except Exception as e: await cxt.say("Exception: `%r`" % e) return id_from = message.author.id id_to = await jcommon.parse_id(id_to, message) res = self.jcoin.transfer(id_from, id_to, \ amount, self.jcoin.LEDGER_PATH) await self.josecoin_save(message, False) if res[0]: await cxt.say(res[1]) else: await cxt.say('jc_err: `%s`' % res[1]) async def c_ltop10(self, message, args, cxt): '''`j!ltop10` - local top 10 people who have high josecoins''' if message.server is None: await cxt.say("You're not in a server, dummy!") return guild = message.server jcdata = dict(self.jcoin.data) # copy range_max = 11 # default 10 users if len(args) > 1: range_max = int(args[1]) + 1 if range_max > 16: await cxt.say("LimitError: values higher than 16 aren't valid") return elif range_max <= 0: await cxt.say("haha no") return maior = { 'id': 0, 'name': '', 'amount': 0.0, } order = [] for i in range(1,range_max): if len(jcdata) < 1: break for member in guild.members: accid = member.id if accid in jcdata: acc = jcdata[accid] name, amount = acc['name'], acc['amount'] if amount > maior['amount']: maior['id'] = accid maior['name'] = name maior['amount'] = amount else: pass if maior['id'] in jcdata: del jcdata[maior['id']] order.append('%d. %s -> %.2f' % \ (i, maior['name'], maior['amount'])) # reset to next maior = { 'id': 0, 'name': '', 'amount': 0.0, } await cxt.say('\n'.join(order)) return async def c_top10(self, message, args, cxt): jcdata = dict(self.jcoin.data) # copy range_max = 11 # default 10 users if len(args) > 1: range_max = int(args[1]) + 1 maior = { 'id': 0, 'name': '', 'amount': 0.0, } if range_max > 16: await cxt.say("LimitError: values higher than 16 aren't valid") return elif range_max <= 0: await cxt.say("haha no") return order = [] for i in range(1,range_max): if len(jcdata) < 1: break for accid in jcdata: acc = jcdata[accid] name, amount = acc['name'], acc['amount'] if amount > maior['amount']: maior['id'] = accid maior['name'] = name maior['amount'] = amount del jcdata[maior['id']] order.append('%d. %s -> %.2f' % \ (i, maior['name'], maior['amount'])) # reset to next maior = { 'id': 0, 'name': '', 'amount': 0.0, } await cxt.say('\n'.join(order)) return async def c_hsteal(self, message, args, cxt): await cxt.say(HELPTEXT_JC_STEAL) async def c_stealstat(self, message, args, cxt): # get status from person personid = message.author.id res = [] points = self.stealdb['points'].get(personid, 3) cooldown = self.stealdb['cdown'].get(personid, None) grace_period = self.stealdb['period'].get(personid, None) res.append("**%s**, you have %d stealing points" % (str(message.author), points)) if cooldown is not None: cooldown_sec, cooldown_type = cooldown cooldown_sec -= time.time() if cooldown_type == 0: res.append(":cop: you're in prison, %.2f hours remaining" % (self.to_hours(cooldown_sec),)) elif cooldown_type == 1: res.append(":alarm_clock: you're waiting for stealing points, %.2f hours remaining" % (self.to_hours(cooldown_sec),)) else: res.append(":warning: unknown cooldown type") if grace_period is not None: grace_period -= time.time() res.append(":angel: you're in grace period, %.2f hours remaining" % (self.to_hours(grace_period),)) await cxt.say('\n'.join(res)) async def do_arrest(self, thief_id, amount, arrest_type=0): self.stealdb['cdown'][thief_id] = (time.time() + ARREST_TIME, arrest_type) if arrest_type == 0: # pay half the amount fine = amount / decimal.Decimal(2) res = await self.jcoin.transfer(thief_id, self.jcoin.jose_id, fine) return res async def c_steal(self, message, args, cxt): '''`j!steal @target amount` - Steal JoséCoins from someone''' if len(args) < 2: await cxt.say(self.c_steal.__doc__) return # parse mention try: target_id = await jcommon.parse_id(args[1], message) except: await cxt.say("Error parsing `@target`") return try: amount = decimal.Decimal(args[2]) except: await cxt.say("Error parsing `amount`") return if message.author.id not in self.jcoin.data: await cxt.say("You don't have a josécoin account.") return if target_id not in self.jcoin.data: await cxt.say("The person you're trying to steal from doesn't have a JoséCoin account") return thief_id = message.author.id # check if thief has cooldowns in place cdown = self.stealdb['cdown'].get(thief_id, None) if cdown is not None: cooldown_end, cooldown_type = cdown remaining = cooldown_end - time.time() if cooldown_type == 0: await cxt.say(":cop: You are still in prison, wait %.2f hours", (self.to_hours(remaining),)) elif cooldown_type == 1: if remaining > 1: await cxt.say("Wait %.2f hours to regenerate your stealing points", (self.to_hours(remaining),)) else: await cxt.say("Stealing points regenerated!") del self.stealdb['points'][thief_id] await self.save_steal_db() return stealuses = self.stealdb['points'].get(thief_id, None) if stealuses is None: self.stealdb['points'][thief_id] = stealuses = 3 thief_user = message.author target_user = await self.client.get_user_info(target_id) grace_period = (self.stealdb['period'].get(target_id, 0) - time.time()) if grace_period > 0: await cxt.say("Target is in :angel: grace period :angel:") await cxt.say("%s tried to steal %.2fJC from you, but you have %.2f hours of grace period", \ target_user, (str(thief_user), amount, self.to_hours(grace_period))) return if stealuses < 1: res = await self.do_arrest(thief_id, amount, 1) await cxt.say("You don't have any more stealing points, wait 8 hours to get more.") return if target_id == self.jcoin.jose_id: arrest = await self.do_arrest(thief_id, amount) await cxt.say(":cop: You can't steal from José. Arrested for 8h\n`%s`", (arrest[1],)) return target_account = self.jcoin.get(target_id)[1] target_amount = target_account['amount'] if amount > target_amount: # automatically in prison arrest = await self.do_arrest(thief_id, amount) await cxt.say(":cop: Arrested because you tried to steal more than the target has, 8h jail time.\n`%s`", \ (arrest[1])) D = decimal.Decimal chance = (BASE_CHANCE + (target_amount / amount)) * D(0.3) if chance > 8: chance = 5 res = random.random() * 10 if res < chance: self.logger.info("Stealing %.2fJC from %s[%s] to %s[%s]", \ amount, target_account['name'], target_id, message.author, thief_id) # steal went good, make transfer ok = self.jcoin.transfer(target_id, thief_id, amount) # check transfer status if not ok[0]: await cxt.say("jc->err: %s", ok[1]) else: await cxt.say("`[res: %.2f < prob: %.2f]` Stealing went well, nobody noticed, you thief. \n`%s`", \ (res, chance, ok[1])) await cxt.say(":gun: You got robbed! The thief(%s) stole `%.2fJC` from you. 3 hour grace period", \ target_user, (str(thief_user), amount)) self.stealdb['period'][target_id] = time.time() + 10800 self.stealdb['points'][message.author.id] -= 1 else: # type 0 cooldown, you got arrested arrest = await self.do_arrest(thief_id, amount) await cxt.say("`[res: %.2f > prob: %.2f]` :cop: Arrested! got 8h cooldown.\n`%s`", \ (res, chance, arrest[1])) await self.save_steal_db() async def c_roubar(self, message, args, cxt): '''`j!roubar @target amount` - alias for `j!steal`''' await self.c_steal(message, args, cxt)
# coding=utf-8 import json import re from urllib.parse import urlparse, parse_qs from flask import current_app from lxml import html import mock import pytest from ...helpers import BaseApplicationTest, BaseAPIClientMixin from dmtestutils.api_model_stubs import FrameworkStub from dmtestutils.api_model_stubs.lot import dos_lots, cloud_lots class APIClientMixin(BaseAPIClientMixin): data_api_client_patch_path = 'app.main.views.marketplace.data_api_client' search_api_client_patch_path = 'app.main.views.marketplace.search_api_client' class TestApplication(APIClientMixin, BaseApplicationTest): def test_analytics_code_should_be_in_javascript(self): res = self.client.get('/static/javascripts/application.js') assert res.status_code == 200 assert 'trackPageview' in res.get_data(as_text=True) def test_should_display_cookie_banner(self): res = self.client.get('/') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) cookie_banner = document.xpath('//div[@id="dm-cookie-banner"]') assert cookie_banner[0].xpath('//h2//text()')[0].strip() == "Can we store analytics cookies on your device?" assert len(self.data_api_client.find_frameworks.call_args_list) == 2 def test_google_verification_code_shown_on_homepage(self): res = self.client.get('/') assert res.status_code == 200 assert 'name="google-site-verification" content="NotARealVerificationKey"' in res.get_data(as_text=True) assert len(self.data_api_client.find_frameworks.call_args_list) == 2 class TestHomepageAccountCreationVirtualPageViews(APIClientMixin, BaseApplicationTest): def test_data_analytics_track_page_view_is_shown_if_account_created_flash_message(self): with self.client.session_transaction() as session: session['_flashes'] = [('track-page-view', 'buyers?account-created=true')] res = self.client.get("/") data = res.get_data(as_text=True) assert 'data-analytics="trackPageView" data-url="buyers?account-created=true"' in data # however this should not be shown as a regular flash message flash_banner_match = re.search(r'<p class="banner-message">\s*(.*)', data, re.MULTILINE) assert flash_banner_match is None, "Unexpected flash banner message '{}'.".format( flash_banner_match.groups()[0]) assert len(self.data_api_client.find_frameworks.call_args_list) == 2 def test_data_analytics_track_page_view_not_shown_if_no_account_created_flash_message(self): res = self.client.get("/") data = res.get_data(as_text=True) assert 'data-analytics="trackPageView" data-url="buyers?account-created=true"' not in data assert len(self.data_api_client.find_frameworks.call_args_list) == 2 class TestHomepageBrowseList(APIClientMixin, BaseApplicationTest): mock_live_dos_1_framework = { "framework": "digital-outcomes-and-specialists", "slug": "digital-outcomes-and-specialists", "status": "live", "id": 5 } mock_live_dos_2_framework = { "framework": "digital-outcomes-and-specialists", "slug": "digital-outcomes-and-specialists-2", "status": "live", "id": 7 } mock_live_g_cloud_9_framework = { "framework": "g-cloud", "slug": "g-cloud-9", "status": "live", "id": 8 } def test_dos_links_are_shown(self): self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[0] == "Find an individual specialist" assert link_texts[-1] == "Find physical datacentre space" assert "Find specialists to work on digital projects" not in link_texts def test_links_are_for_existing_dos_framework_when_a_new_dos_framework_in_standstill_exists(self): mock_standstill_dos_2_framework = self.mock_live_dos_2_framework.copy() mock_standstill_dos_2_framework.update({"status": "standstill"}) self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework, mock_standstill_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos_base_path = '/buyers/frameworks/digital-outcomes-and-specialists/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos_base_path.format(lot_slug) def test_links_are_for_the_newest_live_dos_framework_when_multiple_live_dos_frameworks_exist(self): self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework, self.mock_live_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos2_base_path = '/buyers/frameworks/digital-outcomes-and-specialists-2/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos2_base_path.format(lot_slug) def test_links_are_for_live_dos_framework_when_expired_dos_framework_exists(self): mock_expired_dos_1_framework = self.mock_live_dos_1_framework.copy() mock_expired_dos_1_framework.update({"status": "expired"}) self.data_api_client.find_frameworks.return_value = { "frameworks": [ mock_expired_dos_1_framework, self.mock_live_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos2_base_path = '/buyers/frameworks/digital-outcomes-and-specialists-2/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos2_base_path.format(lot_slug) def test_non_dos_links_are_shown_if_no_live_dos_framework(self): mock_expired_dos_1_framework = self.mock_live_dos_1_framework.copy() mock_expired_dos_1_framework.update({"status": "expired"}) mock_expired_dos_2_framework = self.mock_live_dos_2_framework.copy() mock_expired_dos_2_framework.update({"status": "expired"}) mock_g_cloud_9_framework = self.mock_live_g_cloud_9_framework.copy() self.data_api_client.find_frameworks.return_value = { "frameworks": [ mock_expired_dos_1_framework, mock_expired_dos_2_framework, mock_g_cloud_9_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[0] == "Find cloud hosting, software and support" assert link_texts[1] == "Find physical datacentre space" assert len(link_texts) == 2 class TestHomepageSidebarMessage(APIClientMixin, BaseApplicationTest): @staticmethod def _find_frameworks(framework_slugs_and_statuses): _frameworks = [] for index, framework_slug_and_status in enumerate(framework_slugs_and_statuses): framework_slug, framework_status = framework_slug_and_status _frameworks.append({ 'framework': 'framework', 'slug': framework_slug, 'id': index + 1, 'status': framework_status, 'name': 'Framework' }) return { 'frameworks': _frameworks } @staticmethod def _assert_supplier_nav_is_empty(response_data): document = html.fromstring(response_data) supplier_nav_contents = document.xpath('//nav[@id="app-supplier-nav"]/*') assert len(supplier_nav_contents) == 0 @staticmethod def _assert_supplier_nav_is_not_empty(response_data): document = html.fromstring(response_data) supplier_nav_contents = document.xpath('//nav[@id="app-supplier-nav"]/*') assert len(supplier_nav_contents) > 0 assert supplier_nav_contents[0].xpath('text()')[0].strip() == "Sell services" def _load_homepage(self, framework_slugs_and_statuses, framework_messages): self.data_api_client.find_frameworks.return_value = self._find_frameworks(framework_slugs_and_statuses) res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) if framework_messages: self._assert_supplier_nav_is_not_empty(response_data) for message in framework_messages: assert message in response_data else: self._assert_supplier_nav_is_empty(response_data) def test_homepage_sidebar_message_exists_gcloud_8_coming(self): framework_slugs_and_statuses = [ ('g-cloud-8', 'coming'), ('digital-outcomes-and-specialists', 'live') ] framework_messages = [ u"Provide cloud software and support to the public sector.", u"You need an account to receive notifications about when you can apply." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_message_exists_gcloud_8_open(self): framework_slugs_and_statuses = [ ('g-cloud-8', 'open'), ('digital-outcomes-and-specialists', 'live') ] framework_messages = [ u"Provide cloud software and support to the public sector", u"You need an account to apply.", u"The application deadline is 5pm BST, 23 June 2016." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_message_exists_g_cloud_7_pending(self): framework_slugs_and_statuses = [ ('g-cloud-7', 'pending'), ] framework_messages = [ u"G‑Cloud 7 is closed for applications", u"G‑Cloud 7 services will be available from 23 November 2015." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_messages_when_logged_out(self): self.data_api_client.find_frameworks.return_value = self._find_frameworks([ ('digital-outcomes-and-specialists', 'live') ]) res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) document = html.fromstring(response_data) supplier_links = document.cssselect("#app-supplier-nav a") supplier_link_texts = [item.xpath("normalize-space(string())") for item in supplier_links] assert 'View Digital Outcomes and Specialists opportunities' in supplier_link_texts assert 'Become a supplier' in supplier_link_texts assert 'See Digital Marketplace sales figures' in supplier_link_texts def test_homepage_sidebar_messages_when_logged_in(self): self.data_api_client.find_frameworks.return_value = self._find_frameworks([ ('digital-outcomes-and-specialists', 'live') ]) self.login_as_supplier() res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) document = html.fromstring(response_data) supplier_links = document.cssselect("#app-supplier-nav a") supplier_link_texts = [item.xpath("normalize-space(string())") for item in supplier_links] assert 'View Digital Outcomes and Specialists opportunities' in supplier_link_texts assert 'Become a supplier' not in supplier_link_texts # here we've given an valid framework with a valid status but there is no message.yml file to read from def test_g_cloud_6_open_blows_up(self): framework_slugs_and_statuses = [ ('g-cloud-6', 'open') ] self.data_api_client.find_frameworks.return_value = self._find_frameworks(framework_slugs_and_statuses) res = self.client.get('/') assert res.status_code == 500 class TestStaticMarketplacePages(BaseApplicationTest): def test_cookie_page(self): res = self.client.get('/cookies') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) assert len(document.xpath('//h1[contains(text(), "Cookies on Digital Marketplace")]')) == 1 def test_terms_and_conditions_page(self): res = self.client.get('/terms-and-conditions') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) assert len(document.xpath('//h1[contains(text(), "Terms and conditions")]')) == 1 def test_external_404_makes_all_links_absolute(self): # Get the normal 404 page and a list of the relative URLs it contains links to response1 = self.client.get("/does-not-exist-404") assert response1.status_code == 404 regular_404_document = html.fromstring(response1.get_data(as_text=True)) regular_relative_links = regular_404_document.xpath('//a[starts-with(@href, "/")]') regular_relative_forms = regular_404_document.xpath('//form[starts-with(@action, "/")]') relative_urls = [link.get("href") for link in regular_relative_links] + \ [form.get("action") for form in regular_relative_forms] # Get the "external" 404 page and check it doesn't contain any relative URLs response2 = self.client.get("/404") assert response2.status_code == 404 external_404_document = html.fromstring(response2.get_data(as_text=True)) external_relative_links = external_404_document.xpath('//a[starts-with(@href, "/")]') external_relative_forms = external_404_document.xpath('//form[starts-with(@action, "/")]') assert len(external_relative_links) == len(external_relative_forms) == 0 # Check that there is an absolute URL in the external 404 page for every relative URL in the normal 404 page external_links = external_404_document.xpath('//a') external_forms = external_404_document.xpath('//form') external_urls = [link.get("href") for link in external_links] + [form.get("action") for form in external_forms] for relative_url in relative_urls: assert "http://localhost{}".format(relative_url) in external_urls class BaseBriefPageTest(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.brief = self._get_dos_brief_fixture_data() self.brief_responses = self._get_dos_brief_responses_fixture_data() self.brief_id = self.brief['briefs']['id'] self.data_api_client.find_frameworks.return_value = self._get_frameworks_list_fixture_data() self.data_api_client.get_brief.return_value = self.brief self.data_api_client.find_brief_responses.return_value = self.brief_responses class TestBriefPage(BaseBriefPageTest): @pytest.mark.parametrize('framework_family, expected_status_code', ( ('digital-outcomes-and-specialists', 200), ('g-cloud', 404), )) def test_404_on_framework_that_does_not_support_further_competition(self, framework_family, expected_status_code): brief_id = self.brief['briefs']['id'] res = self.client.get(f'/{framework_family}/opportunities/{brief_id}') assert res.status_code == expected_status_code assert self.data_api_client.find_frameworks.mock_calls == [ mock.call(), ] def test_dos_brief_404s_if_brief_is_draft(self): self.brief['briefs']['status'] = 'draft' brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 404 assert self.data_api_client.mock_calls == [ mock.call.find_frameworks(), mock.call.get_brief(str(brief_id)), ] def test_dos_brief_has_correct_title(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) page_heading = document.cssselect("span.govuk-caption-l + h1.govuk-heading-l") assert page_heading heading = page_heading[0] assert heading.text == self.brief["briefs"]["title"] caption = heading.getprevious() assert caption.text == self.brief["briefs"]["organisation"] def _assert_all_normal_api_calls(self): assert self.data_api_client.mock_calls == [ mock.call.find_frameworks(), mock.call.get_brief(str(self.brief_id)), mock.call.find_brief_responses( brief_id=str(self.brief_id), status='draft,submitted,pending-awarded,awarded', with_data=False, ), ] @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled', 'awarded']) def test_only_one_banner_at_once_brief_page(self, status): self.brief['briefs']['status'] = status if self.brief['briefs']['status'] == 'awarded': self.brief['briefs']['awardedBriefResponseId'] = 14276 res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) number_of_banners = len(document.xpath('//div[@class="banner-temporary-message-without-action"]')) assert number_of_banners == 1 self._assert_all_normal_api_calls() def test_dos_brief_displays_application_stats(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '3' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete applications" assert incomplete_responses_section.xpath('p[1]/text()')[0] == "3 SME, 0 large" assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '5' assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed applications" assert completed_responses_section.xpath('p[1]/text()')[0] == "4 SME, 1 large" self._assert_all_normal_api_calls() def test_application_stats_pluralised_correctly(self): brief_id = self.brief['briefs']['id'] self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "id": 14275, "briefId": brief_id, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "submitted", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierOrganisationSize": 'large' }, { "id": 14276, "briefId": brief_id, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "draft", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 706033, "supplierOrganisationSize": 'micro' } ] } res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '1' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete application" assert incomplete_responses_section.xpath('p[1]/text()')[0] == "1 SME, 0 large" assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '1' assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed application" assert completed_responses_section.xpath('p[1]/text()')[0] == "0 SME, 1 large" def test_dos_brief_displays_application_stats_correctly_when_no_applications(self): brief_id = self.brief['briefs']['id'] self.data_api_client.find_brief_responses.return_value = {"briefResponses": []} res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '0' assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '0' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete applications" assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed applications" assert len(incomplete_responses_section.xpath('p[1]/text()')) == 0 assert len(completed_responses_section.xpath('p[1]/text()')) == 0 def test_dos_brief_has_lot_analytics_string(self): brief = self.brief['briefs'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief['id'])) assert res.status_code == 200 data = res.get_data(as_text=True) analytics_string = '<span data-lot="{lot_slug}"></span>'.format(lot_slug=brief['lotSlug']) assert analytics_string in data def test_dos_brief_has_important_dates(self): brief_id = self.brief['briefs']['id'] self.brief['briefs']['clarificationQuestionsClosedAt'] = "2016-12-14T11:08:28.054129Z" self.brief['briefs']['applicationsClosedAt'] = "2016-12-15T11:08:28.054129Z" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) brief_important_dates = document.xpath( '(//table[@class="summary-item-body"])[1]/tbody/tr') assert 3 == len(brief_important_dates) assert brief_important_dates[0].xpath('td[@class="summary-item-field-first"]')[0].text_content().strip() \ == "Published" assert brief_important_dates[0].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Thursday 1 December 2016" assert brief_important_dates[1].xpath('td[@class="summary-item-field-first"]')[0].text_content().strip() \ == "Deadline for asking questions" assert brief_important_dates[1].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Wednesday 14 December 2016 at 11:08am GMT" assert brief_important_dates[2].xpath('td[@class="summary-item-field-first"]')[0].text_content().strip() \ == "Closing date for applications" assert brief_important_dates[2].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Thursday 15 December 2016 at 11:08am GMT" def test_dos_brief_with_daylight_savings_has_question_deadline_closing_date_forced_to_utc(self): brief_id = self.brief['briefs']['id'] self.brief['briefs']['publishedAt'] = "2016-08-01T23:59:00.000000Z" self.brief['briefs']['clarificationQuestionsClosedAt'] = "2016-08-14T23:59:00.000000Z" self.brief['briefs']['applicationsClosedAt'] = "2016-08-15T23:59:00.000000Z" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) brief_important_dates = document.xpath( '(//table[@class="summary-item-body"])[1]/tbody/tr') assert 3 == len(brief_important_dates) # Publish date does not have UTC filter applied assert brief_important_dates[0].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Monday 1 August 2016" # Question deadline and closing date are forced to 11.59pm (UTC+00) on the correct day assert brief_important_dates[1].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Sunday 14 August 2016 at 11:59pm GMT" assert brief_important_dates[2].xpath('td[@class="summary-item-field"]')[0].text_content().strip() \ == "Monday 15 August 2016 at 11:59pm GMT" def test_dos_brief_has_at_least_one_section(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) section_heading = document.xpath('//h2[@class="summary-item-heading"]')[0] section_attributes = section_heading.xpath('following-sibling::table[1]/tbody/tr') start_date_key = section_attributes[2].xpath('td[1]/span/text()') start_date_value = section_attributes[2].xpath('td[2]/span/text()') contract_length_key = section_attributes[3].xpath('td[1]/span/text()') contract_length_value = section_attributes[3].xpath('td[2]/span/text()') assert section_heading.get('id') == 'opportunity-attributes-1' assert section_heading.text.strip() == 'Overview' assert start_date_key[0] == 'Latest start date' assert start_date_value[0] == 'Wednesday 1 March 2017' assert contract_length_key[0] == 'Expected contract length' assert contract_length_value[0] == '4 weeks' @pytest.mark.parametrize( 'lot_slug, assessment_type', [ ('digital-outcomes', 'written proposal'), ('digital-specialists', 'work history'), ('user-research-participants', 'written proposal'), ] ) def test_dos_brief_displays_mandatory_evaluation_method_for_lot(self, lot_slug, assessment_type): brief = self.brief.copy() brief['briefs']['lot'] = lot_slug brief['briefs']['lotSlug'] = lot_slug brief['briefs']['status'] = 'live' brief['briefs']['publishedAt'] = '2019-01-02T00:00:00.000000Z' brief['briefs']['frameworkSlug'] = 'digital-outcomes-and-specialists-4' self.data_api_client.get_brief.return_value = brief res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief['briefs']['id'])) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) section_heading = document.xpath( '//h2[@class="summary-item-heading"][contains(text(), "How suppliers will be evaluated")]' )[0] section_description = section_heading.xpath('following-sibling::p')[0] assert section_description.text.strip() == f'All suppliers will be asked to provide a {assessment_type}.' def test_dos_brief_has_questions_and_answers(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) xpath = '//h2[@id="clarification-questions"]/following-sibling::table/tbody/tr' clarification_questions = document.xpath(xpath) number = clarification_questions[0].xpath('td[1]/span/span/text()')[0].strip() question = clarification_questions[0].xpath('td[1]/span/text()')[0].strip() answer = clarification_questions[0].xpath('td[2]/span/text()')[0].strip() assert number == "1." assert question == "Why?" assert answer == "Because" def test_can_apply_to_live_brief(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_apply_button_visible_if_status_is_draft(self): self.brief_responses['briefResponses'][0]['status'] = 'draft' brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled']) def test_cannot_apply_to_closed_cancelled_or_unsuccessful_brief(self, status): self.brief['briefs']['status'] = status self.brief['briefs']['applicationsClosedAt'] = "2016-12-15T11:08:28.054129Z" brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(brief_id)) assert len(apply_links) == 0 def test_cannot_apply_to_awarded_brief(self): self.brief['briefs']['status'] = "awarded" self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 123456, "supplierName": "Another, Better, Company Limited", "supplierOrganisationSize": "large" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(brief_id)) assert len(apply_links) == 0 def test_dos_brief_specialist_role_displays_label(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert 'qualityAssurance' not in res.get_data(as_text=True) assert 'Quality assurance analyst' in res.get_data(as_text=True) def _assert_start_application(self, document, brief_id): assert document.xpath( "//form[@method='get'][normalize-space(string(.//button))=$t]/@action", t="Apply for this opportunity", ) == ["/suppliers/opportunities/{}/responses/start".format(brief_id)] def _assert_view_application(self, document, brief_id): assert len(document.xpath( '//a[@href="{0}"][contains(normalize-space(text()), normalize-space("{1}"))]'.format( "/suppliers/opportunities/{}/responses/result".format(brief_id), "View your application", ) )) == 1 def test_unauthenticated_start_application(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_buyer_start_application(self): self.login_as_buyer() brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_supplier_start_application(self): self.login_as_supplier() # mocking that we haven't applied self.data_api_client.find_brief_responses.return_value = { "briefResponses": [] } brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_supplier_applied_view_application_for_live_opportunity(self): self.login_as_supplier() # fixtures for brief responses have been set up so one of them has the supplier_id we are logged in as. brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled']) def test_supplier_applied_view_application_for_closed_unsuccessful_or_cancelled_opportunity(self, status): self.login_as_supplier() self.brief['briefs']['status'] = status brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_awarded_to_logged_in_supplier(self): self.login_as_supplier() self.brief['briefs']['status'] = 'awarded' self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_pending_awarded_to_logged_in_supplier(self): self.login_as_supplier() self.brief['briefs']['status'] = 'closed' self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"pending": True}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "pending-awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_awarded_to_other_supplier(self): self.login_as_supplier() self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 123456, "supplierName": "Another, Better, Company Limited", "supplierOrganisationSize": "large" }, { "id": 14277, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "submitted", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['status'] = 'awarded' self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() class TestBriefPageQandASectionViewQandASessionDetails(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['questionAndAnswerSessionDetails'] = {'many': 'details'} self.brief['briefs']['clarificationQuestionsAreClosed'] = False def test_live_brief_q_and_a_session(self): """ As long as a: A user is not logged in The brief is live Clarification questions are open The brief has Q and A session details We should show the: link to login and view the QAS details """ res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Log in to view question and answer session details" expected_link = '/suppliers/opportunities/{}/question-and-answer-session'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link def test_live_brief_q_and_a_session_logged_in(self): """ As long as a: Supplier user is logged in The brief is live Clarification questions are open The brief has Q and A session details We should show the: Link to view the QAS details """ self.login_as_supplier() res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "View question and answer session details" expected_link = '/suppliers/opportunities/{}/question-and-answer-session'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link @pytest.mark.parametrize( 'brief_data', [ {'status': 'withdrawn'}, {'status': 'closed'}, {'questionAndAnswerSessionDetails': None}, {'clarificationQuestionsAreClosed': True} ] ) def test_brief_q_and_a_session_link_not_shown(self, brief_data): """ On viewing briefs with data like the above the page should load but we should not get the link. """ self.brief['briefs'].update(brief_data) res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 unexpected_texts = [ "Log in to view question and answer session details", "View question and answer session details" ] for unexpected_text in unexpected_texts: assert len(document.xpath('.//a[contains(text(),"{}")]'.format(unexpected_text))) == 0 class TestBriefPageQandASectionAskAQuestion(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['clarificationQuestionsAreClosed'] = False def test_live_brief_ask_a_question(self): """ As long as a: A user is not logged in The brief is live Clarification questions are open We should show the: link to login and ask a question """ res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Log in to ask a question" expected_link = '/suppliers/opportunities/{}/ask-a-question'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link def test_live_brief_ask_a_question_logged_in(self): """ As long as a: Supplier user is logged in The brief is live Clarification questions are open We should show the: Link to ask a question """ self.login_as_supplier() res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Ask a question" expected_link = '/suppliers/opportunities/{}/ask-a-question'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link @pytest.mark.parametrize( 'brief_data', [ {'status': 'withdrawn'}, {'status': 'closed'}, {'clarificationQuestionsAreClosed': True} ] ) def test_brief_ask_a_question_link_not_shown(self, brief_data): """ On viewing briefs with data like the above the page should load but we should not get either the log in to ask a question or ask a question links. """ self.brief['briefs'].update(brief_data) res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 unexpected_texts = ["Log in to ask a question", "Ask a question"] for unexpected_text in unexpected_texts: assert len(document.xpath('.//a[contains(text(),"{}")]'.format(unexpected_text))) == 0 class TestAwardedBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'awarded' self.brief['briefs']['awardedBriefResponseId'] = 14276 def test_award_banner_with_winning_supplier_shown_on_awarded_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Awarded to Example Company Limited' in awarded_banner.xpath('h2/text()')[0] def test_contract_start_date_visible_on_award_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Start date: Monday 21 August 2017' in awarded_banner.xpath('p/text()')[0] def test_contract_value_visible_on_award_banner_does_not_include_zero_pence(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert u'Value: £20,000' in awarded_banner.xpath('p/text()')[1] def test_contract_value_visible_on_award_banner_includes_non_zero_pence(self): self.brief_responses["briefResponses"][1]["awardDetails"]["awardedContractValue"] = "20000.10" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert u'Value: £20,000.10' in awarded_banner.xpath('p/text()')[1] def test_supplier_size_visible_on_award_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Company size: SME' in awarded_banner.xpath('p/text()')[2] class TestCancelledBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'cancelled' def test_cancelled_banner_shown_on_cancelled_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'This opportunity was cancelled' in cancelled_banner.xpath('h2/text()')[0] def test_explanation_message_shown_on_cancelled_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] expected_message = ("The buyer cancelled this opportunity, for example because they no longer have the budget. " "They may publish an updated version later." ) assert expected_message in cancelled_banner.xpath('p/text()')[0] class TestUnsuccessfulBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'unsuccessful' def test_unsuccessful_banner_shown_on_unsuccessful_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) unsuccessful_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'No suitable suppliers applied' in unsuccessful_banner.xpath('h2/text()')[0] def test_explanation_message_shown_on_unsuccessful_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] expected_message = ("The buyer didn't award this contract because no suppliers met their requirements. " "They may publish an updated version later." ) assert expected_message in cancelled_banner.xpath('p/text()')[0] class TestWithdrawnSpecificBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = "withdrawn" self.brief['briefs']['withdrawnAt'] = "2016-11-25T10:47:23.126761Z" def test_dos_brief_visible_when_withdrawn(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) assert res.status_code == 200 def test_apply_button_not_visible_for_withdrawn_briefs(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(self.brief_id)) assert len(apply_links) == 0 def test_deadline_text_not_shown(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'The deadline for asking questions about this opportunity was ' not in page def test_withdrawn_banner_shown_on_withdrawn_brief(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on' in page assert ( "You can&#39;t apply for this opportunity now. " "The buyer may publish an updated&nbsp;version on the Digital&nbsp;Marketplace" ) in page @pytest.mark.parametrize('status', ['live', 'closed']) def test_withdrawn_banner_not_shown_on_live_and_closed_brief(self, status): self.brief['briefs']['status'] = status del self.brief['briefs']['withdrawnAt'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on' not in page def test_dateformat_in_withdrawn_banner_displayed_correctly(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on Friday&nbsp;25&nbsp;November&nbsp;2016' in page class TestCatalogueOfBriefsPage(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.search_api_client.search.return_value = self._get_dos_brief_search_api_response_fixture_data() self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data(), self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] self.data_api_client.find_frameworks.return_value = {'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status='live', lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=1, slug='digital-outcomes-and-specialists', status='expired', lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=2, slug='foobar', status='expired', lots=cloud_lots() ).response(), FrameworkStub( id=4, slug='g-cloud-9', status='live', lots=cloud_lots() ).response() ]} def normalize_qs(self, qs): return {k: set(v) for k, v in parse_qs(qs).items() if k != "page"} @pytest.mark.parametrize('framework_family, expected_status_code', ( ('digital-outcomes-and-specialists', 200), ('g-cloud', 404), )) def test_404_on_framework_that_does_not_support_further_competition(self, framework_family, expected_status_code): res = self.client.get(f'/{framework_family}/opportunities') assert res.status_code == expected_status_code def test_catalogue_of_briefs_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open,closed' ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) lot_filters = document.xpath("//form[@method='get']//ul[@class='lot-filters--last-list']//a") assert set(element.text for element in lot_filters) == { "Digital outcomes (629)", "Digital specialists (827)", "User research participants (39)", } assert len(document.xpath("//form[@method='get']//ul[@class='lot-filters--last-list']//strong")) == 0 status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert set(element.get("value") for element in status_inputs) == {"open", "closed"} assert not any(element.get("checked") for element in status_inputs) location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == "864 results found in All categories" specialist_role_labels = document.xpath("//div[@class='search-result']/ul[2]/li[2]/text()") assert len(specialist_role_labels) == 2 # only two briefs has a specialist role so only one label should exist assert specialist_role_labels[0].strip() == "Developer" assert specialist_role_labels[1].strip() == "Technical architect" def test_catalogue_of_briefs_page_filtered(self): original_url = "/digital-outcomes-and-specialists/opportunities?page=2"\ "&statusOpenClosed=open&lot=digital-outcomes&location=wales&location=london" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open', lot='digital-outcomes', location='wales,london', page='2', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } assert document.xpath( "//a[@id=$i][contains(@class, $c)][normalize-space(string())=normalize-space($t)][@href=$h]", i="dm-clear-all-filters", c="clear-filters-link", t="Clear filters", h="/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes", ) status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": True, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": True, "london": True, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) parsed_original_url = urlparse(original_url) parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0]) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_prev_url.path == parsed_next_url.path assert self.normalize_qs(parsed_original_url.query) == \ self.normalize_qs(parsed_next_url.query) == \ self.normalize_qs(parsed_prev_url.query) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found in Digital outcomes" def test_catalogue_of_briefs_page_filtered_keyword_search(self): original_url = "/digital-outcomes-and-specialists/opportunities?page=2"\ "&statusOpenClosed=open&lot=digital-outcomes"\ "&location=offsite&q=Richie+Poldy" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open', lot='digital-outcomes', location='offsite', page='2', q='Richie Poldy', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } assert document.xpath( "//a[@id=$i][contains(@class, $c)][normalize-space(string())=normalize-space($t)][@href=$h]", i="dm-clear-all-filters", c="clear-filters-link", t="Clear filters", h="/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&q=Richie+Poldy", ) status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": True, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": True, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("Richie Poldy",) parsed_original_url = urlparse(original_url) parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0]) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_prev_url.path == parsed_next_url.path assert self.normalize_qs(parsed_original_url.query) == \ self.normalize_qs(parsed_next_url.query) == \ self.normalize_qs(parsed_prev_url.query) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found containing Richie Poldy in Digital outcomes" def test_catalogue_of_briefs_page_filtered_all_lots_selected(self): original_url = "/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&lot=digital-specialists"\ "&lot=user-research-participants" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open,closed', lot='digital-outcomes', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": False, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) parsed_original_url = urlparse(original_url) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_next_url.path assert self.normalize_qs(parsed_next_url.query) == {'lot': {'digital-outcomes'}} ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found in Digital outcomes" @pytest.mark.parametrize( ('dos_status', 'dos2_status', 'expected_url_slug_suffix'), ( ('live', 'standstill', ''), ('expired', 'live', '-2'), ) ) @mock.patch('app.main.views.marketplace.content_loader') def test_opportunity_data_download_info_and_link_visible_on_catalogue_page( self, content_loader, dos_status, dos2_status, expected_url_slug_suffix ): self.data_api_client.find_frameworks.return_value = {'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status=dos2_status, lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=1, slug='digital-outcomes-and-specialists', status=dos_status, lots=dos_lots(), has_further_competition=True ).response() ]} res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) header = document.xpath("//h2[@id='opportunity-data-header']")[0].text description = document.xpath("//p[@id='opportunity-data-description']")[0].text expected_desc = "Download data buyers have provided about closed opportunities. Some data may be missing." link = document.xpath("//a[normalize-space(text())='Download data (CSV)']")[0].values() expected_link = ( "https://assets.digitalmarketplace.service.gov.uk" + f"/digital-outcomes-and-specialists{expected_url_slug_suffix}/communications/data/opportunity-data.csv" ) assert "Opportunity data" in header assert expected_desc in description assert expected_link in link def test_catalogue_of_briefs_page_shows_pagination_if_more_pages(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=2') assert res.status_code == 200 page = res.get_data(as_text=True) document = html.fromstring(page) assert '<li class="previous">' in page assert '<li class="next">' in page prev_url = str(document.xpath('string(//li[@class="previous"]/a/@href)')) next_url = str(document.xpath('string(//li[@class="next"]/a/@href)')) assert prev_url.endswith('/opportunities?page=1') assert next_url.endswith('/opportunities?page=3') assert '<span class="page-numbers">1 of 9</span>' in res.get_data(as_text=True) assert '<span class="page-numbers">3 of 9</span>' in res.get_data(as_text=True) def test_no_pagination_if_no_more_pages(self): with self.app.app_context(): current_app.config['DM_SEARCH_PAGE_SIZE'] = 1000 res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 page = res.get_data(as_text=True) assert '<li class="previous">' not in page assert '<li class="next">' not in page def test_catalogue_of_briefs_page_404_for_framework_that_does_not_exist(self): res = self.client.get('/digital-giraffes-and-monkeys/opportunities') assert res.status_code == 404 self.data_api_client.find_frameworks.assert_called_once_with() def test_briefs_search_has_js_hidden_filter_button(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) filter_button = document.xpath( '//button[contains(@class, "js-hidden")][contains(@class, "js-dm-live-search")]' '[normalize-space(text())="Filter"]' ) assert len(filter_button) == 1 def test_opportunity_status_and_published_date(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) live_opportunity_published_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-3].text_content().strip() assert live_opportunity_published_at == "Published: Friday 17 November 2017" live_opportunity_qs_closing_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-2].text_content().strip() assert live_opportunity_qs_closing_at == "Deadline for asking questions: Sunday 26 November 2017" live_opportunity_closing_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert live_opportunity_closing_at == "Closing: Friday 1 December 2017" closed_opportunity_status = document.xpath( '//div[@class="search-result"][2]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert closed_opportunity_status == "Closed: awaiting outcome" unsuccessful_opportunity_status = document.xpath( '//div[@class="search-result"][3]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert unsuccessful_opportunity_status == "Closed: no suitable suppliers" cancelled_opportunity_status = document.xpath( '//div[@class="search-result"][4]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert cancelled_opportunity_status == "Closed: cancelled" awarded_opportunity_status = document.xpath( '//div[@class="search-result"][6]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert awarded_opportunity_status == "Closed: awarded" def test_should_render_summary_for_0_results_in_all_lots(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">0</span> results found in <em>All categories</em>' in summary def test_should_render_summary_for_0_results_in_particular_lot(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">0</span> results found in <em>Digital outcomes</em>' in summary def test_should_render_summary_for_1_result_found_in_all_lots(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 1 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">1</span> result found in <em>All categories</em>' in summary def test_should_render_summary_for_many_results_found_in_a_particular_lot(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-specialists') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">864</span> results found in <em>Digital specialists</em>' in summary def test_should_render_suggestions_for_0_results(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 xpath = html.fromstring(res.get_data(as_text=True)).xpath assert xpath('boolean(//div[contains(@class, "search-suggestion")])') def test_should_not_render_suggestions_when_results(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 xpath = html.fromstring(res.get_data(as_text=True)).xpath assert not xpath('boolean(//div[contains(@class, "search-suggestion")])') def test_should_ignore_unknown_arguments(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?location=my-lovely-horse') assert res.status_code == 200 def test_should_404_on_invalid_page_param(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=1') assert res.status_code == 200 res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=-1') assert res.status_code == 404 res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=potato') assert res.status_code == 404 def test_search_results_with_invalid_lot_fall_back_to_all_categories(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=bad-lot-slug') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) lots = document.xpath('//div[@class="lot-filters"]//ul[@class="lot-filters--last-list"]//li/a') assert lots[0].text_content().startswith('Digital outcomes') assert lots[1].text_content().startswith('Digital specialists') assert lots[2].text_content().startswith('User research participants') def test_lot_links_retain_all_category_filters(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?location=london') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) lots = document.xpath('//div[@class="lot-filters"]//ul[@class="lot-filters--last-list"]//li/a') for lot in lots: assert 'location=london' in lot.get('href') def test_lot_with_no_briefs_is_not_a_link(self): specialists_aggregation = self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data() specialists_aggregation['aggregations']['lot']['digital-specialists'] = 0 self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), specialists_aggregation, self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) specialist_label = document.xpath("//ul[@class='lot-filters--last-list']//li")[-2] assert len(specialist_label.xpath('a')) == 0 assert specialist_label.text_content() == 'Digital specialists (0)' def test_filter_form_given_filter_selection(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&location=london') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) hidden_inputs = document.xpath('//form[@id="js-dm-live-search-form"]//input[@type="hidden"]') kv_pairs = {input_el.get('name'): input_el.get('value') for input_el in hidden_inputs} assert kv_pairs == {'lot': 'digital-outcomes'} class TestCatalogueOfBriefsFilterOnClick(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.search_api_client.search.return_value = self._get_dos_brief_search_api_response_fixture_data() self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data(), self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] self.data_api_client.find_frameworks.return_value = { 'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status='live', lots=dos_lots(), has_further_competition=True ).response() ] } @pytest.mark.parametrize('query_string, content_type', (('', 'text/html; charset=utf-8'), ('?live-results=true', 'application/json'))) def test_endpoint_switches_on_live_results_request(self, query_string, content_type): res = self.client.get('/digital-outcomes-and-specialists/opportunities{}'.format(query_string)) assert res.status_code == 200 assert res.content_type == content_type def test_live_results_returns_valid_json_structure(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?live-results=true') data = json.loads(res.get_data(as_text=True)) assert sorted(data.keys()) == sorted(( 'results', 'summary', 'summary-accessible-hint', 'categories', 'filter-title' )) for k, v in data.items(): assert set(v.keys()) == {'selector', 'html'} # We want to enforce using css IDs to describe the nodes which should be replaced. assert v['selector'].startswith('#') live_results_expected_templates = ( "search/_results_wrapper.html", "search/_categories_wrapper.html", "search/_summary.html", "search/_summary_accessible_hint.html", "search/_filter_title.html" ) @pytest.mark.parametrize( ('query_string', 'urls'), ( ('', ('search/briefs.html',)), ('?live-results=true', live_results_expected_templates) ) ) @mock.patch('app.main.views.marketplace.render_template', autospec=True) def test_base_page_renders_search_services(self, render_template_patch, query_string, urls): render_template_patch.return_value = '<p>some html</p>' self.client.get('/digital-outcomes-and-specialists/opportunities{}'.format(query_string)) assert urls == tuple(x[0][0] for x in render_template_patch.call_args_list) def test_form_has_js_hidden_filter_button(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) filter_button = document.xpath( '//button[contains(@class, "js-hidden")][contains(@class, "js-dm-live-search")]' '[normalize-space(text())="Filter"]' ) assert len(filter_button) == 1 class TestGCloudHomepageLinks(APIClientMixin, BaseApplicationTest): mock_live_g_cloud_framework = { "framework": "g-cloud", "slug": "g-cloud-x", "status": "live", "id": 5 } @pytest.mark.parametrize('framework_slug, gcloud_content', (('g-cloud-8', 'Find cloud technology and support'), ('g-cloud-9', 'Find cloud hosting, software and support'))) def test_g_cloud_homepage_content_is_correct(self, framework_slug, gcloud_content): self.data_api_client.find_frameworks.return_value = { "frameworks": [self.mock_live_g_cloud_framework.copy()] } self.data_api_client.find_frameworks.return_value['frameworks'][0].update({'slug': framework_slug}) res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[-2] == gcloud_content assert link_texts[-1] == 'Find physical datacentre space' Refactor tests to use summary list # coding=utf-8 import json import re from urllib.parse import urlparse, parse_qs from flask import current_app from lxml import html import mock import pytest from ...helpers import BaseApplicationTest, BaseAPIClientMixin from dmtestutils.api_model_stubs import FrameworkStub from dmtestutils.api_model_stubs.lot import dos_lots, cloud_lots class APIClientMixin(BaseAPIClientMixin): data_api_client_patch_path = 'app.main.views.marketplace.data_api_client' search_api_client_patch_path = 'app.main.views.marketplace.search_api_client' class TestApplication(APIClientMixin, BaseApplicationTest): def test_analytics_code_should_be_in_javascript(self): res = self.client.get('/static/javascripts/application.js') assert res.status_code == 200 assert 'trackPageview' in res.get_data(as_text=True) def test_should_display_cookie_banner(self): res = self.client.get('/') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) cookie_banner = document.xpath('//div[@id="dm-cookie-banner"]') assert cookie_banner[0].xpath('//h2//text()')[0].strip() == "Can we store analytics cookies on your device?" assert len(self.data_api_client.find_frameworks.call_args_list) == 2 def test_google_verification_code_shown_on_homepage(self): res = self.client.get('/') assert res.status_code == 200 assert 'name="google-site-verification" content="NotARealVerificationKey"' in res.get_data(as_text=True) assert len(self.data_api_client.find_frameworks.call_args_list) == 2 class TestHomepageAccountCreationVirtualPageViews(APIClientMixin, BaseApplicationTest): def test_data_analytics_track_page_view_is_shown_if_account_created_flash_message(self): with self.client.session_transaction() as session: session['_flashes'] = [('track-page-view', 'buyers?account-created=true')] res = self.client.get("/") data = res.get_data(as_text=True) assert 'data-analytics="trackPageView" data-url="buyers?account-created=true"' in data # however this should not be shown as a regular flash message flash_banner_match = re.search(r'<p class="banner-message">\s*(.*)', data, re.MULTILINE) assert flash_banner_match is None, "Unexpected flash banner message '{}'.".format( flash_banner_match.groups()[0]) assert len(self.data_api_client.find_frameworks.call_args_list) == 2 def test_data_analytics_track_page_view_not_shown_if_no_account_created_flash_message(self): res = self.client.get("/") data = res.get_data(as_text=True) assert 'data-analytics="trackPageView" data-url="buyers?account-created=true"' not in data assert len(self.data_api_client.find_frameworks.call_args_list) == 2 class TestHomepageBrowseList(APIClientMixin, BaseApplicationTest): mock_live_dos_1_framework = { "framework": "digital-outcomes-and-specialists", "slug": "digital-outcomes-and-specialists", "status": "live", "id": 5 } mock_live_dos_2_framework = { "framework": "digital-outcomes-and-specialists", "slug": "digital-outcomes-and-specialists-2", "status": "live", "id": 7 } mock_live_g_cloud_9_framework = { "framework": "g-cloud", "slug": "g-cloud-9", "status": "live", "id": 8 } def test_dos_links_are_shown(self): self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[0] == "Find an individual specialist" assert link_texts[-1] == "Find physical datacentre space" assert "Find specialists to work on digital projects" not in link_texts def test_links_are_for_existing_dos_framework_when_a_new_dos_framework_in_standstill_exists(self): mock_standstill_dos_2_framework = self.mock_live_dos_2_framework.copy() mock_standstill_dos_2_framework.update({"status": "standstill"}) self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework, mock_standstill_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos_base_path = '/buyers/frameworks/digital-outcomes-and-specialists/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos_base_path.format(lot_slug) def test_links_are_for_the_newest_live_dos_framework_when_multiple_live_dos_frameworks_exist(self): self.data_api_client.find_frameworks.return_value = { "frameworks": [ self.mock_live_dos_1_framework, self.mock_live_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos2_base_path = '/buyers/frameworks/digital-outcomes-and-specialists-2/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos2_base_path.format(lot_slug) def test_links_are_for_live_dos_framework_when_expired_dos_framework_exists(self): mock_expired_dos_1_framework = self.mock_live_dos_1_framework.copy() mock_expired_dos_1_framework.update({"status": "expired"}) self.data_api_client.find_frameworks.return_value = { "frameworks": [ mock_expired_dos_1_framework, self.mock_live_dos_2_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_locations = [item.values()[1] for item in document.cssselect('#app-buyer-nav a')] lots = ['digital-specialists', 'digital-outcomes', 'user-research-participants', 'user-research-studios'] dos2_base_path = '/buyers/frameworks/digital-outcomes-and-specialists-2/requirements/{}' for index, lot_slug in enumerate(lots): assert link_locations[index] == dos2_base_path.format(lot_slug) def test_non_dos_links_are_shown_if_no_live_dos_framework(self): mock_expired_dos_1_framework = self.mock_live_dos_1_framework.copy() mock_expired_dos_1_framework.update({"status": "expired"}) mock_expired_dos_2_framework = self.mock_live_dos_2_framework.copy() mock_expired_dos_2_framework.update({"status": "expired"}) mock_g_cloud_9_framework = self.mock_live_g_cloud_9_framework.copy() self.data_api_client.find_frameworks.return_value = { "frameworks": [ mock_expired_dos_1_framework, mock_expired_dos_2_framework, mock_g_cloud_9_framework, ] } res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[0] == "Find cloud hosting, software and support" assert link_texts[1] == "Find physical datacentre space" assert len(link_texts) == 2 class TestHomepageSidebarMessage(APIClientMixin, BaseApplicationTest): @staticmethod def _find_frameworks(framework_slugs_and_statuses): _frameworks = [] for index, framework_slug_and_status in enumerate(framework_slugs_and_statuses): framework_slug, framework_status = framework_slug_and_status _frameworks.append({ 'framework': 'framework', 'slug': framework_slug, 'id': index + 1, 'status': framework_status, 'name': 'Framework' }) return { 'frameworks': _frameworks } @staticmethod def _assert_supplier_nav_is_empty(response_data): document = html.fromstring(response_data) supplier_nav_contents = document.xpath('//nav[@id="app-supplier-nav"]/*') assert len(supplier_nav_contents) == 0 @staticmethod def _assert_supplier_nav_is_not_empty(response_data): document = html.fromstring(response_data) supplier_nav_contents = document.xpath('//nav[@id="app-supplier-nav"]/*') assert len(supplier_nav_contents) > 0 assert supplier_nav_contents[0].xpath('text()')[0].strip() == "Sell services" def _load_homepage(self, framework_slugs_and_statuses, framework_messages): self.data_api_client.find_frameworks.return_value = self._find_frameworks(framework_slugs_and_statuses) res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) if framework_messages: self._assert_supplier_nav_is_not_empty(response_data) for message in framework_messages: assert message in response_data else: self._assert_supplier_nav_is_empty(response_data) def test_homepage_sidebar_message_exists_gcloud_8_coming(self): framework_slugs_and_statuses = [ ('g-cloud-8', 'coming'), ('digital-outcomes-and-specialists', 'live') ] framework_messages = [ u"Provide cloud software and support to the public sector.", u"You need an account to receive notifications about when you can apply." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_message_exists_gcloud_8_open(self): framework_slugs_and_statuses = [ ('g-cloud-8', 'open'), ('digital-outcomes-and-specialists', 'live') ] framework_messages = [ u"Provide cloud software and support to the public sector", u"You need an account to apply.", u"The application deadline is 5pm BST, 23 June 2016." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_message_exists_g_cloud_7_pending(self): framework_slugs_and_statuses = [ ('g-cloud-7', 'pending'), ] framework_messages = [ u"G‑Cloud 7 is closed for applications", u"G‑Cloud 7 services will be available from 23 November 2015." ] self._load_homepage(framework_slugs_and_statuses, framework_messages) def test_homepage_sidebar_messages_when_logged_out(self): self.data_api_client.find_frameworks.return_value = self._find_frameworks([ ('digital-outcomes-and-specialists', 'live') ]) res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) document = html.fromstring(response_data) supplier_links = document.cssselect("#app-supplier-nav a") supplier_link_texts = [item.xpath("normalize-space(string())") for item in supplier_links] assert 'View Digital Outcomes and Specialists opportunities' in supplier_link_texts assert 'Become a supplier' in supplier_link_texts assert 'See Digital Marketplace sales figures' in supplier_link_texts def test_homepage_sidebar_messages_when_logged_in(self): self.data_api_client.find_frameworks.return_value = self._find_frameworks([ ('digital-outcomes-and-specialists', 'live') ]) self.login_as_supplier() res = self.client.get('/') assert res.status_code == 200 response_data = res.get_data(as_text=True) document = html.fromstring(response_data) supplier_links = document.cssselect("#app-supplier-nav a") supplier_link_texts = [item.xpath("normalize-space(string())") for item in supplier_links] assert 'View Digital Outcomes and Specialists opportunities' in supplier_link_texts assert 'Become a supplier' not in supplier_link_texts # here we've given an valid framework with a valid status but there is no message.yml file to read from def test_g_cloud_6_open_blows_up(self): framework_slugs_and_statuses = [ ('g-cloud-6', 'open') ] self.data_api_client.find_frameworks.return_value = self._find_frameworks(framework_slugs_and_statuses) res = self.client.get('/') assert res.status_code == 500 class TestStaticMarketplacePages(BaseApplicationTest): def test_cookie_page(self): res = self.client.get('/cookies') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) assert len(document.xpath('//h1[contains(text(), "Cookies on Digital Marketplace")]')) == 1 def test_terms_and_conditions_page(self): res = self.client.get('/terms-and-conditions') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) assert len(document.xpath('//h1[contains(text(), "Terms and conditions")]')) == 1 def test_external_404_makes_all_links_absolute(self): # Get the normal 404 page and a list of the relative URLs it contains links to response1 = self.client.get("/does-not-exist-404") assert response1.status_code == 404 regular_404_document = html.fromstring(response1.get_data(as_text=True)) regular_relative_links = regular_404_document.xpath('//a[starts-with(@href, "/")]') regular_relative_forms = regular_404_document.xpath('//form[starts-with(@action, "/")]') relative_urls = [link.get("href") for link in regular_relative_links] + \ [form.get("action") for form in regular_relative_forms] # Get the "external" 404 page and check it doesn't contain any relative URLs response2 = self.client.get("/404") assert response2.status_code == 404 external_404_document = html.fromstring(response2.get_data(as_text=True)) external_relative_links = external_404_document.xpath('//a[starts-with(@href, "/")]') external_relative_forms = external_404_document.xpath('//form[starts-with(@action, "/")]') assert len(external_relative_links) == len(external_relative_forms) == 0 # Check that there is an absolute URL in the external 404 page for every relative URL in the normal 404 page external_links = external_404_document.xpath('//a') external_forms = external_404_document.xpath('//form') external_urls = [link.get("href") for link in external_links] + [form.get("action") for form in external_forms] for relative_url in relative_urls: assert "http://localhost{}".format(relative_url) in external_urls class BaseBriefPageTest(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.brief = self._get_dos_brief_fixture_data() self.brief_responses = self._get_dos_brief_responses_fixture_data() self.brief_id = self.brief['briefs']['id'] self.data_api_client.find_frameworks.return_value = self._get_frameworks_list_fixture_data() self.data_api_client.get_brief.return_value = self.brief self.data_api_client.find_brief_responses.return_value = self.brief_responses class TestBriefPage(BaseBriefPageTest): @pytest.mark.parametrize('framework_family, expected_status_code', ( ('digital-outcomes-and-specialists', 200), ('g-cloud', 404), )) def test_404_on_framework_that_does_not_support_further_competition(self, framework_family, expected_status_code): brief_id = self.brief['briefs']['id'] res = self.client.get(f'/{framework_family}/opportunities/{brief_id}') assert res.status_code == expected_status_code assert self.data_api_client.find_frameworks.mock_calls == [ mock.call(), ] def test_dos_brief_404s_if_brief_is_draft(self): self.brief['briefs']['status'] = 'draft' brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 404 assert self.data_api_client.mock_calls == [ mock.call.find_frameworks(), mock.call.get_brief(str(brief_id)), ] def test_dos_brief_has_correct_title(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) page_heading = document.cssselect("span.govuk-caption-l + h1.govuk-heading-l") assert page_heading heading = page_heading[0] assert heading.text == self.brief["briefs"]["title"] caption = heading.getprevious() assert caption.text == self.brief["briefs"]["organisation"] def _assert_all_normal_api_calls(self): assert self.data_api_client.mock_calls == [ mock.call.find_frameworks(), mock.call.get_brief(str(self.brief_id)), mock.call.find_brief_responses( brief_id=str(self.brief_id), status='draft,submitted,pending-awarded,awarded', with_data=False, ), ] @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled', 'awarded']) def test_only_one_banner_at_once_brief_page(self, status): self.brief['briefs']['status'] = status if self.brief['briefs']['status'] == 'awarded': self.brief['briefs']['awardedBriefResponseId'] = 14276 res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) number_of_banners = len(document.xpath('//div[@class="banner-temporary-message-without-action"]')) assert number_of_banners == 1 self._assert_all_normal_api_calls() def test_dos_brief_displays_application_stats(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '3' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete applications" assert incomplete_responses_section.xpath('p[1]/text()')[0] == "3 SME, 0 large" assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '5' assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed applications" assert completed_responses_section.xpath('p[1]/text()')[0] == "4 SME, 1 large" self._assert_all_normal_api_calls() def test_application_stats_pluralised_correctly(self): brief_id = self.brief['briefs']['id'] self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "id": 14275, "briefId": brief_id, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "submitted", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierOrganisationSize": 'large' }, { "id": 14276, "briefId": brief_id, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "draft", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 706033, "supplierOrganisationSize": 'micro' } ] } res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '1' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete application" assert incomplete_responses_section.xpath('p[1]/text()')[0] == "1 SME, 0 large" assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '1' assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed application" assert completed_responses_section.xpath('p[1]/text()')[0] == "0 SME, 1 large" def test_dos_brief_displays_application_stats_correctly_when_no_applications(self): brief_id = self.brief['briefs']['id'] self.data_api_client.find_brief_responses.return_value = {"briefResponses": []} res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) incomplete_responses_section = document.xpath('//div[@id="incomplete-applications"]')[0] completed_responses_section = document.xpath('//div[@id="completed-applications"]')[0] assert incomplete_responses_section.xpath('h2//span[1]/text()')[0] == '0' assert completed_responses_section.xpath('h2//span[1]/text()')[0] == '0' assert incomplete_responses_section.xpath('h2//span[2]/text()')[0] == "Incomplete applications" assert completed_responses_section.xpath('h2//span[2]/text()')[0] == "Completed applications" assert len(incomplete_responses_section.xpath('p[1]/text()')) == 0 assert len(completed_responses_section.xpath('p[1]/text()')) == 0 def test_dos_brief_has_lot_analytics_string(self): brief = self.brief['briefs'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief['id'])) assert res.status_code == 200 data = res.get_data(as_text=True) analytics_string = '<span data-lot="{lot_slug}"></span>'.format(lot_slug=brief['lotSlug']) assert analytics_string in data def test_dos_brief_has_important_dates(self): brief_id = self.brief['briefs']['id'] self.brief['briefs']['clarificationQuestionsClosedAt'] = "2016-12-14T11:08:28.054129Z" self.brief['briefs']['applicationsClosedAt'] = "2016-12-15T11:08:28.054129Z" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) brief_important_dates = document.xpath( '(//dl[@id="opportunity-important-dates"]//div)') assert 3 == len(brief_important_dates) assert brief_important_dates[0].xpath('dt')[0].text_content().strip() \ == "Published" assert brief_important_dates[0].xpath('dd')[0].text_content().strip() \ == "Thursday 1 December 2016" assert brief_important_dates[1].xpath('dt')[0].text_content().strip() \ == "Deadline for asking questions" assert brief_important_dates[1].xpath('dd')[0].text_content().strip() \ == "Wednesday 14 December 2016 at 11:08am GMT" assert brief_important_dates[2].xpath('dt')[0].text_content().strip() \ == "Closing date for applications" assert brief_important_dates[2].xpath('dd')[0].text_content().strip() \ == "Thursday 15 December 2016 at 11:08am GMT" def test_dos_brief_with_daylight_savings_has_question_deadline_closing_date_forced_to_utc(self): brief_id = self.brief['briefs']['id'] self.brief['briefs']['publishedAt'] = "2016-08-01T23:59:00.000000Z" self.brief['briefs']['clarificationQuestionsClosedAt'] = "2016-08-14T23:59:00.000000Z" self.brief['briefs']['applicationsClosedAt'] = "2016-08-15T23:59:00.000000Z" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) brief_important_dates = document.xpath( '(//dl[@id="opportunity-important-dates"]//div)') assert 3 == len(brief_important_dates) # Publish date does not have UTC filter applied assert brief_important_dates[0].xpath('dd')[0].text_content().strip() \ == "Monday 1 August 2016" # Question deadline and closing date are forced to 11.59pm (UTC+00) on the correct day assert brief_important_dates[1].xpath('dd')[0].text_content().strip() \ == "Sunday 14 August 2016 at 11:59pm GMT" assert brief_important_dates[2].xpath('dd')[0].text_content().strip() \ == "Monday 15 August 2016 at 11:59pm GMT" def test_dos_brief_has_at_least_one_section(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) section_heading = document.xpath('//h2[@id="opportunity-attributes-1"]')[0] section_attributes = section_heading.xpath('following-sibling::dl[1]/div') start_date_key = section_attributes[2].xpath('dt/text()') start_date_value = section_attributes[2].xpath('dd/text()') contract_length_key = section_attributes[3].xpath('dt/text()') contract_length_value = section_attributes[3].xpath('dd/text()') assert section_heading.text.strip() == 'Overview' assert start_date_key[0].strip() == 'Latest start date' assert start_date_value[0].strip() == 'Wednesday 1 March 2017' assert contract_length_key[0].strip() == 'Expected contract length' assert contract_length_value[0].strip() == '4 weeks' @pytest.mark.parametrize( 'lot_slug, assessment_type', [ ('digital-outcomes', 'written proposal'), ('digital-specialists', 'work history'), ('user-research-participants', 'written proposal'), ] ) def test_dos_brief_displays_mandatory_evaluation_method_for_lot(self, lot_slug, assessment_type): brief = self.brief.copy() brief['briefs']['lot'] = lot_slug brief['briefs']['lotSlug'] = lot_slug brief['briefs']['status'] = 'live' brief['briefs']['publishedAt'] = '2019-01-02T00:00:00.000000Z' brief['briefs']['frameworkSlug'] = 'digital-outcomes-and-specialists-4' self.data_api_client.get_brief.return_value = brief res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief['briefs']['id'])) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) section_heading = document.xpath( '//h2[contains(text(), "How suppliers will be evaluated")]' )[0] section_description = section_heading.xpath('following-sibling::p')[0] assert section_description.text.strip() == f'All suppliers will be asked to provide a {assessment_type}.' def test_dos_brief_has_questions_and_answers(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) xpath = '//h2[@id="clarification-questions"]/following-sibling::dl/div' clarification_questions = document.xpath(xpath) question = clarification_questions[0].xpath('dt')[0].text_content().strip() answer = clarification_questions[0].xpath('dd')[0].text_content().strip() assert question.startswith("1.") assert question.endswith("Why?") assert answer == "Because" def test_can_apply_to_live_brief(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_apply_button_visible_if_status_is_draft(self): self.brief_responses['briefResponses'][0]['status'] = 'draft' brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled']) def test_cannot_apply_to_closed_cancelled_or_unsuccessful_brief(self, status): self.brief['briefs']['status'] = status self.brief['briefs']['applicationsClosedAt'] = "2016-12-15T11:08:28.054129Z" brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(brief_id)) assert len(apply_links) == 0 def test_cannot_apply_to_awarded_brief(self): self.brief['briefs']['status'] = "awarded" self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 123456, "supplierName": "Another, Better, Company Limited", "supplierOrganisationSize": "large" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(brief_id)) assert len(apply_links) == 0 def test_dos_brief_specialist_role_displays_label(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert 'qualityAssurance' not in res.get_data(as_text=True) assert 'Quality assurance analyst' in res.get_data(as_text=True) def _assert_start_application(self, document, brief_id): assert document.xpath( "//form[@method='get'][normalize-space(string(.//button))=$t]/@action", t="Apply for this opportunity", ) == ["/suppliers/opportunities/{}/responses/start".format(brief_id)] def _assert_view_application(self, document, brief_id): assert len(document.xpath( '//a[@href="{0}"][contains(normalize-space(text()), normalize-space("{1}"))]'.format( "/suppliers/opportunities/{}/responses/result".format(brief_id), "View your application", ) )) == 1 def test_unauthenticated_start_application(self): brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_buyer_start_application(self): self.login_as_buyer() brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_supplier_start_application(self): self.login_as_supplier() # mocking that we haven't applied self.data_api_client.find_brief_responses.return_value = { "briefResponses": [] } brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_start_application(document, brief_id) def test_supplier_applied_view_application_for_live_opportunity(self): self.login_as_supplier() # fixtures for brief responses have been set up so one of them has the supplier_id we are logged in as. brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) @pytest.mark.parametrize('status', ['closed', 'unsuccessful', 'cancelled']) def test_supplier_applied_view_application_for_closed_unsuccessful_or_cancelled_opportunity(self, status): self.login_as_supplier() self.brief['briefs']['status'] = status brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_awarded_to_logged_in_supplier(self): self.login_as_supplier() self.brief['briefs']['status'] = 'awarded' self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_pending_awarded_to_logged_in_supplier(self): self.login_as_supplier() self.brief['briefs']['status'] = 'closed' self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"pending": True}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "pending-awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() def test_supplier_applied_view_application_for_opportunity_awarded_to_other_supplier(self): self.login_as_supplier() self.data_api_client.find_brief_responses.return_value = { "briefResponses": [ { "awardDetails": {"awardedContractStartDate": "2017-08-21", "awardedContractValue": "20000.00"}, "id": 14276, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "awarded", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 123456, "supplierName": "Another, Better, Company Limited", "supplierOrganisationSize": "large" }, { "id": 14277, "briefId": 1, "createdAt": "2016-12-02T11:09:28.054129Z", "status": "submitted", "submittedAt": "2016-12-05T11:09:28.054129Z", "supplierId": 1234, "supplierName": "Example Company Limited", "supplierOrganisationSize": "small" } ] } self.brief['briefs']['status'] = 'awarded' self.brief['briefs']['awardedBriefResponseId'] = 14276 brief_id = self.brief['briefs']['id'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(brief_id)) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self._assert_view_application(document, brief_id) self._assert_all_normal_api_calls() class TestBriefPageQandASectionViewQandASessionDetails(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['questionAndAnswerSessionDetails'] = {'many': 'details'} self.brief['briefs']['clarificationQuestionsAreClosed'] = False def test_live_brief_q_and_a_session(self): """ As long as a: A user is not logged in The brief is live Clarification questions are open The brief has Q and A session details We should show the: link to login and view the QAS details """ res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Log in to view question and answer session details" expected_link = '/suppliers/opportunities/{}/question-and-answer-session'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link def test_live_brief_q_and_a_session_logged_in(self): """ As long as a: Supplier user is logged in The brief is live Clarification questions are open The brief has Q and A session details We should show the: Link to view the QAS details """ self.login_as_supplier() res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "View question and answer session details" expected_link = '/suppliers/opportunities/{}/question-and-answer-session'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link @pytest.mark.parametrize( 'brief_data', [ {'status': 'withdrawn'}, {'status': 'closed'}, {'questionAndAnswerSessionDetails': None}, {'clarificationQuestionsAreClosed': True} ] ) def test_brief_q_and_a_session_link_not_shown(self, brief_data): """ On viewing briefs with data like the above the page should load but we should not get the link. """ self.brief['briefs'].update(brief_data) res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 unexpected_texts = [ "Log in to view question and answer session details", "View question and answer session details" ] for unexpected_text in unexpected_texts: assert len(document.xpath('.//a[contains(text(),"{}")]'.format(unexpected_text))) == 0 class TestBriefPageQandASectionAskAQuestion(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['clarificationQuestionsAreClosed'] = False def test_live_brief_ask_a_question(self): """ As long as a: A user is not logged in The brief is live Clarification questions are open We should show the: link to login and ask a question """ res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Log in to ask a question" expected_link = '/suppliers/opportunities/{}/ask-a-question'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link def test_live_brief_ask_a_question_logged_in(self): """ As long as a: Supplier user is logged in The brief is live Clarification questions are open We should show the: Link to ask a question """ self.login_as_supplier() res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 expected_text = "Ask a question" expected_link = '/suppliers/opportunities/{}/ask-a-question'.format(self.brief_id) assert expected_text in document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].text assert document.xpath('.//a[contains(text(),"{}")]'.format(expected_text))[0].attrib['href'] == expected_link @pytest.mark.parametrize( 'brief_data', [ {'status': 'withdrawn'}, {'status': 'closed'}, {'clarificationQuestionsAreClosed': True} ] ) def test_brief_ask_a_question_link_not_shown(self, brief_data): """ On viewing briefs with data like the above the page should load but we should not get either the log in to ask a question or ask a question links. """ self.brief['briefs'].update(brief_data) res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 unexpected_texts = ["Log in to ask a question", "Ask a question"] for unexpected_text in unexpected_texts: assert len(document.xpath('.//a[contains(text(),"{}")]'.format(unexpected_text))) == 0 class TestAwardedBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'awarded' self.brief['briefs']['awardedBriefResponseId'] = 14276 def test_award_banner_with_winning_supplier_shown_on_awarded_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Awarded to Example Company Limited' in awarded_banner.xpath('h2/text()')[0] def test_contract_start_date_visible_on_award_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Start date: Monday 21 August 2017' in awarded_banner.xpath('p/text()')[0] def test_contract_value_visible_on_award_banner_does_not_include_zero_pence(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert u'Value: £20,000' in awarded_banner.xpath('p/text()')[1] def test_contract_value_visible_on_award_banner_includes_non_zero_pence(self): self.brief_responses["briefResponses"][1]["awardDetails"]["awardedContractValue"] = "20000.10" res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert u'Value: £20,000.10' in awarded_banner.xpath('p/text()')[1] def test_supplier_size_visible_on_award_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) awarded_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'Company size: SME' in awarded_banner.xpath('p/text()')[2] class TestCancelledBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'cancelled' def test_cancelled_banner_shown_on_cancelled_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'This opportunity was cancelled' in cancelled_banner.xpath('h2/text()')[0] def test_explanation_message_shown_on_cancelled_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] expected_message = ("The buyer cancelled this opportunity, for example because they no longer have the budget. " "They may publish an updated version later." ) assert expected_message in cancelled_banner.xpath('p/text()')[0] class TestUnsuccessfulBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = 'unsuccessful' def test_unsuccessful_banner_shown_on_unsuccessful_brief_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) unsuccessful_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] assert 'No suitable suppliers applied' in unsuccessful_banner.xpath('h2/text()')[0] def test_explanation_message_shown_on_unsuccessful_banner(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) cancelled_banner = document.xpath('//div[@class="banner-temporary-message-without-action"]')[0] expected_message = ("The buyer didn't award this contract because no suppliers met their requirements. " "They may publish an updated version later." ) assert expected_message in cancelled_banner.xpath('p/text()')[0] class TestWithdrawnSpecificBriefPage(BaseBriefPageTest): def setup_method(self, method): super().setup_method(method) self.brief['briefs']['status'] = "withdrawn" self.brief['briefs']['withdrawnAt'] = "2016-11-25T10:47:23.126761Z" def test_dos_brief_visible_when_withdrawn(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) assert res.status_code == 200 def test_apply_button_not_visible_for_withdrawn_briefs(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) document = html.fromstring(res.get_data(as_text=True)) apply_links = document.xpath('//a[@href="/suppliers/opportunities/{}/responses/start"]'.format(self.brief_id)) assert len(apply_links) == 0 def test_deadline_text_not_shown(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'The deadline for asking questions about this opportunity was ' not in page def test_withdrawn_banner_shown_on_withdrawn_brief(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on' in page assert ( "You can&#39;t apply for this opportunity now. " "The buyer may publish an updated&nbsp;version on the Digital&nbsp;Marketplace" ) in page @pytest.mark.parametrize('status', ['live', 'closed']) def test_withdrawn_banner_not_shown_on_live_and_closed_brief(self, status): self.brief['briefs']['status'] = status del self.brief['briefs']['withdrawnAt'] res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on' not in page def test_dateformat_in_withdrawn_banner_displayed_correctly(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities/{}'.format(self.brief_id)) page = res.get_data(as_text=True) assert 'This opportunity was withdrawn on Friday&nbsp;25&nbsp;November&nbsp;2016' in page class TestCatalogueOfBriefsPage(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.search_api_client.search.return_value = self._get_dos_brief_search_api_response_fixture_data() self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data(), self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] self.data_api_client.find_frameworks.return_value = {'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status='live', lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=1, slug='digital-outcomes-and-specialists', status='expired', lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=2, slug='foobar', status='expired', lots=cloud_lots() ).response(), FrameworkStub( id=4, slug='g-cloud-9', status='live', lots=cloud_lots() ).response() ]} def normalize_qs(self, qs): return {k: set(v) for k, v in parse_qs(qs).items() if k != "page"} @pytest.mark.parametrize('framework_family, expected_status_code', ( ('digital-outcomes-and-specialists', 200), ('g-cloud', 404), )) def test_404_on_framework_that_does_not_support_further_competition(self, framework_family, expected_status_code): res = self.client.get(f'/{framework_family}/opportunities') assert res.status_code == expected_status_code def test_catalogue_of_briefs_page(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open,closed' ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) lot_filters = document.xpath("//form[@method='get']//ul[@class='lot-filters--last-list']//a") assert set(element.text for element in lot_filters) == { "Digital outcomes (629)", "Digital specialists (827)", "User research participants (39)", } assert len(document.xpath("//form[@method='get']//ul[@class='lot-filters--last-list']//strong")) == 0 status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert set(element.get("value") for element in status_inputs) == {"open", "closed"} assert not any(element.get("checked") for element in status_inputs) location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == "864 results found in All categories" specialist_role_labels = document.xpath("//div[@class='search-result']/ul[2]/li[2]/text()") assert len(specialist_role_labels) == 2 # only two briefs has a specialist role so only one label should exist assert specialist_role_labels[0].strip() == "Developer" assert specialist_role_labels[1].strip() == "Technical architect" def test_catalogue_of_briefs_page_filtered(self): original_url = "/digital-outcomes-and-specialists/opportunities?page=2"\ "&statusOpenClosed=open&lot=digital-outcomes&location=wales&location=london" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open', lot='digital-outcomes', location='wales,london', page='2', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } assert document.xpath( "//a[@id=$i][contains(@class, $c)][normalize-space(string())=normalize-space($t)][@href=$h]", i="dm-clear-all-filters", c="clear-filters-link", t="Clear filters", h="/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes", ) status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": True, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": True, "london": True, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) parsed_original_url = urlparse(original_url) parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0]) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_prev_url.path == parsed_next_url.path assert self.normalize_qs(parsed_original_url.query) == \ self.normalize_qs(parsed_next_url.query) == \ self.normalize_qs(parsed_prev_url.query) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found in Digital outcomes" def test_catalogue_of_briefs_page_filtered_keyword_search(self): original_url = "/digital-outcomes-and-specialists/opportunities?page=2"\ "&statusOpenClosed=open&lot=digital-outcomes"\ "&location=offsite&q=Richie+Poldy" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open', lot='digital-outcomes', location='offsite', page='2', q='Richie Poldy', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } assert document.xpath( "//a[@id=$i][contains(@class, $c)][normalize-space(string())=normalize-space($t)][@href=$h]", i="dm-clear-all-filters", c="clear-filters-link", t="Clear filters", h="/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&q=Richie+Poldy", ) status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": True, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": True, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("Richie Poldy",) parsed_original_url = urlparse(original_url) parsed_prev_url = urlparse(document.xpath("//li[@class='previous']/a/@href")[0]) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_prev_url.path == parsed_next_url.path assert self.normalize_qs(parsed_original_url.query) == \ self.normalize_qs(parsed_next_url.query) == \ self.normalize_qs(parsed_prev_url.query) ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found containing Richie Poldy in Digital outcomes" def test_catalogue_of_briefs_page_filtered_all_lots_selected(self): original_url = "/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&lot=digital-specialists"\ "&lot=user-research-participants" res = self.client.get(original_url) assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) self.data_api_client.find_frameworks.assert_called_once_with() self.search_api_client.search.assert_called_once_with( index='briefs-digital-outcomes-and-specialists', doc_type='briefs', statusOpenClosed='open,closed', lot='digital-outcomes', ) heading = document.xpath('normalize-space(//h1/text())') assert heading == "Digital Outcomes and Specialists opportunities" assert ('View buyer requirements for digital outcomes, ' 'digital specialists and user research participants') in document.xpath( "normalize-space(//div[@class='marketplace-paragraph']/p/text())" ) all_categories_return_link = document.xpath("//form[@method='get']//div[@class='lot-filters']/ul/li/a")[0] assert all_categories_return_link.text == 'All categories' lot_filters = document.xpath("//form[@method='get']//div[@class='lot-filters']//ul//ul/li/*[1]") assert { element.text: element.tag for element in lot_filters } == { 'Digital outcomes (629)': 'strong', 'Digital specialists (827)': 'a', 'User research participants (39)': 'a', } status_inputs = document.xpath("//form[@method='get']//input[@name='statusOpenClosed']") assert { element.get("value"): bool(element.get("checked")) for element in status_inputs } == { "open": False, "closed": False, } location_inputs = document.xpath("//form[@method='get']//input[@name='location']") assert { element.get("value"): bool(element.get("checked")) for element in location_inputs } == { "scotland": False, "north east england": False, "north west england": False, "yorkshire and the humber": False, "east midlands": False, "west midlands": False, "east of england": False, "wales": False, "london": False, "south east england": False, "south west england": False, "northern ireland": False, "international (outside the uk)": False, "offsite": False, } q_inputs = document.xpath("//form[@method='get']//input[@name='q']") assert tuple(element.get("value") for element in q_inputs) == ("",) parsed_original_url = urlparse(original_url) parsed_next_url = urlparse(document.xpath("//li[@class='next']/a/@href")[0]) assert parsed_original_url.path == parsed_next_url.path assert self.normalize_qs(parsed_next_url.query) == {'lot': {'digital-outcomes'}} ss_elem = document.xpath("//p[@class='search-summary']")[0] assert self._normalize_whitespace(self._squashed_element_text(ss_elem)) == \ "864 results found in Digital outcomes" @pytest.mark.parametrize( ('dos_status', 'dos2_status', 'expected_url_slug_suffix'), ( ('live', 'standstill', ''), ('expired', 'live', '-2'), ) ) @mock.patch('app.main.views.marketplace.content_loader') def test_opportunity_data_download_info_and_link_visible_on_catalogue_page( self, content_loader, dos_status, dos2_status, expected_url_slug_suffix ): self.data_api_client.find_frameworks.return_value = {'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status=dos2_status, lots=dos_lots(), has_further_competition=True ).response(), FrameworkStub( id=1, slug='digital-outcomes-and-specialists', status=dos_status, lots=dos_lots(), has_further_competition=True ).response() ]} res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) header = document.xpath("//h2[@id='opportunity-data-header']")[0].text description = document.xpath("//p[@id='opportunity-data-description']")[0].text expected_desc = "Download data buyers have provided about closed opportunities. Some data may be missing." link = document.xpath("//a[normalize-space(text())='Download data (CSV)']")[0].values() expected_link = ( "https://assets.digitalmarketplace.service.gov.uk" + f"/digital-outcomes-and-specialists{expected_url_slug_suffix}/communications/data/opportunity-data.csv" ) assert "Opportunity data" in header assert expected_desc in description assert expected_link in link def test_catalogue_of_briefs_page_shows_pagination_if_more_pages(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=2') assert res.status_code == 200 page = res.get_data(as_text=True) document = html.fromstring(page) assert '<li class="previous">' in page assert '<li class="next">' in page prev_url = str(document.xpath('string(//li[@class="previous"]/a/@href)')) next_url = str(document.xpath('string(//li[@class="next"]/a/@href)')) assert prev_url.endswith('/opportunities?page=1') assert next_url.endswith('/opportunities?page=3') assert '<span class="page-numbers">1 of 9</span>' in res.get_data(as_text=True) assert '<span class="page-numbers">3 of 9</span>' in res.get_data(as_text=True) def test_no_pagination_if_no_more_pages(self): with self.app.app_context(): current_app.config['DM_SEARCH_PAGE_SIZE'] = 1000 res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 page = res.get_data(as_text=True) assert '<li class="previous">' not in page assert '<li class="next">' not in page def test_catalogue_of_briefs_page_404_for_framework_that_does_not_exist(self): res = self.client.get('/digital-giraffes-and-monkeys/opportunities') assert res.status_code == 404 self.data_api_client.find_frameworks.assert_called_once_with() def test_briefs_search_has_js_hidden_filter_button(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) filter_button = document.xpath( '//button[contains(@class, "js-hidden")][contains(@class, "js-dm-live-search")]' '[normalize-space(text())="Filter"]' ) assert len(filter_button) == 1 def test_opportunity_status_and_published_date(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) live_opportunity_published_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-3].text_content().strip() assert live_opportunity_published_at == "Published: Friday 17 November 2017" live_opportunity_qs_closing_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-2].text_content().strip() assert live_opportunity_qs_closing_at == "Deadline for asking questions: Sunday 26 November 2017" live_opportunity_closing_at = document.xpath( '//div[@class="search-result"][1]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert live_opportunity_closing_at == "Closing: Friday 1 December 2017" closed_opportunity_status = document.xpath( '//div[@class="search-result"][2]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert closed_opportunity_status == "Closed: awaiting outcome" unsuccessful_opportunity_status = document.xpath( '//div[@class="search-result"][3]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert unsuccessful_opportunity_status == "Closed: no suitable suppliers" cancelled_opportunity_status = document.xpath( '//div[@class="search-result"][4]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert cancelled_opportunity_status == "Closed: cancelled" awarded_opportunity_status = document.xpath( '//div[@class="search-result"][6]//li[@class="search-result-metadata-item"]' )[-1].text_content().strip() assert awarded_opportunity_status == "Closed: awarded" def test_should_render_summary_for_0_results_in_all_lots(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">0</span> results found in <em>All categories</em>' in summary def test_should_render_summary_for_0_results_in_particular_lot(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">0</span> results found in <em>Digital outcomes</em>' in summary def test_should_render_summary_for_1_result_found_in_all_lots(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 1 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">1</span> result found in <em>All categories</em>' in summary def test_should_render_summary_for_many_results_found_in_a_particular_lot(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-specialists') assert res.status_code == 200 summary = self.find_search_summary(res.get_data(as_text=True))[0] assert '<span class="search-summary-count">864</span> results found in <em>Digital specialists</em>' in summary def test_should_render_suggestions_for_0_results(self): search_results = self._get_dos_brief_search_api_response_fixture_data() search_results['meta']['total'] = 0 self.search_api_client.search.return_value = search_results res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 xpath = html.fromstring(res.get_data(as_text=True)).xpath assert xpath('boolean(//div[contains(@class, "search-suggestion")])') def test_should_not_render_suggestions_when_results(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 xpath = html.fromstring(res.get_data(as_text=True)).xpath assert not xpath('boolean(//div[contains(@class, "search-suggestion")])') def test_should_ignore_unknown_arguments(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?location=my-lovely-horse') assert res.status_code == 200 def test_should_404_on_invalid_page_param(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=1') assert res.status_code == 200 res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=-1') assert res.status_code == 404 res = self.client.get('/digital-outcomes-and-specialists/opportunities?page=potato') assert res.status_code == 404 def test_search_results_with_invalid_lot_fall_back_to_all_categories(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=bad-lot-slug') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) lots = document.xpath('//div[@class="lot-filters"]//ul[@class="lot-filters--last-list"]//li/a') assert lots[0].text_content().startswith('Digital outcomes') assert lots[1].text_content().startswith('Digital specialists') assert lots[2].text_content().startswith('User research participants') def test_lot_links_retain_all_category_filters(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?location=london') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) lots = document.xpath('//div[@class="lot-filters"]//ul[@class="lot-filters--last-list"]//li/a') for lot in lots: assert 'location=london' in lot.get('href') def test_lot_with_no_briefs_is_not_a_link(self): specialists_aggregation = self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data() specialists_aggregation['aggregations']['lot']['digital-specialists'] = 0 self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), specialists_aggregation, self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) specialist_label = document.xpath("//ul[@class='lot-filters--last-list']//li")[-2] assert len(specialist_label.xpath('a')) == 0 assert specialist_label.text_content() == 'Digital specialists (0)' def test_filter_form_given_filter_selection(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?lot=digital-outcomes&location=london') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) hidden_inputs = document.xpath('//form[@id="js-dm-live-search-form"]//input[@type="hidden"]') kv_pairs = {input_el.get('name'): input_el.get('value') for input_el in hidden_inputs} assert kv_pairs == {'lot': 'digital-outcomes'} class TestCatalogueOfBriefsFilterOnClick(APIClientMixin, BaseApplicationTest): def setup_method(self, method): super().setup_method(method) self.search_api_client.search.return_value = self._get_dos_brief_search_api_response_fixture_data() self.search_api_client.aggregate.side_effect = [ self._get_dos_brief_search_api_aggregations_response_outcomes_fixture_data(), self._get_dos_brief_search_api_aggregations_response_specialists_fixture_data(), self._get_dos_brief_search_api_aggregations_response_user_research_fixture_data(), ] self.data_api_client.find_frameworks.return_value = { 'frameworks': [ FrameworkStub( id=3, slug='digital-outcomes-and-specialists-2', status='live', lots=dos_lots(), has_further_competition=True ).response() ] } @pytest.mark.parametrize('query_string, content_type', (('', 'text/html; charset=utf-8'), ('?live-results=true', 'application/json'))) def test_endpoint_switches_on_live_results_request(self, query_string, content_type): res = self.client.get('/digital-outcomes-and-specialists/opportunities{}'.format(query_string)) assert res.status_code == 200 assert res.content_type == content_type def test_live_results_returns_valid_json_structure(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities?live-results=true') data = json.loads(res.get_data(as_text=True)) assert sorted(data.keys()) == sorted(( 'results', 'summary', 'summary-accessible-hint', 'categories', 'filter-title' )) for k, v in data.items(): assert set(v.keys()) == {'selector', 'html'} # We want to enforce using css IDs to describe the nodes which should be replaced. assert v['selector'].startswith('#') live_results_expected_templates = ( "search/_results_wrapper.html", "search/_categories_wrapper.html", "search/_summary.html", "search/_summary_accessible_hint.html", "search/_filter_title.html" ) @pytest.mark.parametrize( ('query_string', 'urls'), ( ('', ('search/briefs.html',)), ('?live-results=true', live_results_expected_templates) ) ) @mock.patch('app.main.views.marketplace.render_template', autospec=True) def test_base_page_renders_search_services(self, render_template_patch, query_string, urls): render_template_patch.return_value = '<p>some html</p>' self.client.get('/digital-outcomes-and-specialists/opportunities{}'.format(query_string)) assert urls == tuple(x[0][0] for x in render_template_patch.call_args_list) def test_form_has_js_hidden_filter_button(self): res = self.client.get('/digital-outcomes-and-specialists/opportunities') assert res.status_code == 200 document = html.fromstring(res.get_data(as_text=True)) filter_button = document.xpath( '//button[contains(@class, "js-hidden")][contains(@class, "js-dm-live-search")]' '[normalize-space(text())="Filter"]' ) assert len(filter_button) == 1 class TestGCloudHomepageLinks(APIClientMixin, BaseApplicationTest): mock_live_g_cloud_framework = { "framework": "g-cloud", "slug": "g-cloud-x", "status": "live", "id": 5 } @pytest.mark.parametrize('framework_slug, gcloud_content', (('g-cloud-8', 'Find cloud technology and support'), ('g-cloud-9', 'Find cloud hosting, software and support'))) def test_g_cloud_homepage_content_is_correct(self, framework_slug, gcloud_content): self.data_api_client.find_frameworks.return_value = { "frameworks": [self.mock_live_g_cloud_framework.copy()] } self.data_api_client.find_frameworks.return_value['frameworks'][0].update({'slug': framework_slug}) res = self.client.get("/") document = html.fromstring(res.get_data(as_text=True)) assert res.status_code == 200 link_texts = [item.text_content().strip() for item in document.cssselect('#app-buyer-nav a')] assert link_texts[-2] == gcloud_content assert link_texts[-1] == 'Find physical datacentre space'
#!/usr/bin/env python # CommandLine.py # Copyright (C) 2006 CCLRC, Graeme Winter # # This code is distributed under the BSD license, a copy of which is # included in the root directory of this package. # # 12th June 2006 # # A handler for all of the information which may be passed in on the command # line. This singleton object should be able to handle the input, structure # it and make it available in a useful fashion. # # This is a hook into a global data repository, should mostly be replaced with # a Phil interface. import sys import os import exceptions import copy import traceback from xia2.Experts.FindImages import image2template_directory from xia2.Schema.XProject import XProject from xia2.Handlers.Flags import Flags from xia2.Handlers.Phil import PhilIndex from xia2.Handlers.Streams import Chatter, Debug from xia2.Handlers.PipelineSelection import add_preference, get_preferences from xia2.Handlers.Executables import Executables from libtbx.utils import Sorry def which(pgm): # python equivalent to the 'which' command # http://stackoverflow.com/questions/9877462/is-there-a-python-equivalent-to-the-which-command path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p,pgm) if os.path.exists(p) and os.access(p,os.X_OK): return p def load_datablock(filename): from xia2.Schema import imageset_cache, update_with_reference_geometry from dxtbx.serialize import load from libtbx.containers import OrderedDict datablocks = load.datablock(filename, check_format=False) for datablock in datablocks: imagesets = datablock.extract_imagesets() params = PhilIndex.get_python_object() reference_geometry = params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: update_with_reference_geometry(imagesets, reference_geometry) for imageset in imagesets: template = imageset.get_template() if template not in imageset_cache: imageset_cache[template] = OrderedDict() imageset_cache[template][ imageset.get_scan().get_image_range()[0]] = imageset class _CommandLine(object): '''A class to represent the command line input.''' def __init__(self): '''Initialise all of the information from the command line.''' self._argv = [] self._understood = [] self._default_template = [] self._default_directory = [] self._hdf5_master_files = [] self._default_start_end = { } return def get_argv(self): return self._argv def print_command_line(self): cl = self.get_command_line() Chatter.write('Command line: %s' % cl) return def get_command_line(self): import libtbx.load_env cl = libtbx.env.dispatcher_name if cl: if 'xia2' not in cl or 'python' in cl: cl = 'xia2' else: cl = 'xia2' for arg in sys.argv[1:]: if ' ' in arg: arg = '"%s"' %arg cl += ' %s' % arg return cl def setup(self): '''Set everything up...''' # check arguments are all ascii for token in sys.argv: try: token.encode('ascii') except UnicodeDecodeError, e: raise RuntimeError, 'non-ascii characters in input' self._argv = copy.deepcopy(sys.argv) # first of all try to interpret arguments as phil parameters/files from xia2.Handlers.Phil import master_phil from libtbx.phil import command_line cmd_line = command_line.argument_interpreter(master_phil=master_phil) working_phil, self._argv = cmd_line.process_and_fetch( args=self._argv, custom_processor="collect_remaining") PhilIndex.merge_phil(working_phil) try: PhilIndex.get_python_object() except RuntimeError, e: raise Sorry(e) #PhilIndex.get_diff().show() # things which are single token flags... self._read_debug() self._read_interactive() self._read_ice() self._read_egg() self._read_uniform_sd() self._read_trust_timestamps() self._read_batch_scale() self._read_small_molecule() self._read_quick() self._read_chef() self._read_mask() self._read_reversephi() self._read_no_lattice_test() self._read_no_relax() self._read_no_profile() self._read_norefine() self._read_noremove() # pipeline options self._read_2d() self._read_2di() self._read_dials() self._read_3d() self._read_3di() self._read_3dii() self._read_3dd() self._read_migrate_data() self._read_zero_dose() self._read_free_fraction() self._read_free_total() # finer grained control over the selection of indexer, integrater # and scaler to use. self._read_indexer() self._read_integrater() self._read_scaler() self._read_executables() # flags relating to unfixed bugs... self._read_fixed_628() try: self._read_beam() except: raise RuntimeError, self._help_beam() try: self._read_cell() except: raise RuntimeError, self._help_cell() try: self._read_image() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_image(), str(e)) try: self._read_project_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_project_name(), str(e)) try: self._read_atom_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_atom_name(), str(e)) try: self._read_phil() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_phil(), str(e)) try: self._read_crystal_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_crystal_name(), str(e)) try: self._read_ispyb_xml_out() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_ispyb_xml_out(), str(e)) try: self._read_hdr_in() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_hdr_in(), str(e)) try: self._read_hdr_out() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_hdr_out(), str(e)) try: self._read_pickle() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_pickle(), str(e)) try: self._read_parallel() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_parallel(), str(e)) try: self._read_serial() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_serial(), str(e)) try: self._read_xparm() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparm(), str(e)) try: self._read_xparm_ub() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparm_ub(), str(e)) try: self._read_min_images() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_min_images(), str(e)) try: self._read_start_end() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_start_end(), str(e)) try: self._read_xparallel() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparallel(), str(e)) try: self._read_spacegroup() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_spacegroup(), str(e)) try: self._read_resolution() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_resolution(), str(e)) try: self._read_z_min() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_z_min(), str(e)) try: self._read_aimless_secondary() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_aimless_secondary(), str(e)) try: self._read_freer_file() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_freer_file(), str(e)) try: self._read_reference_reflection_file() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_reference_reflection_file(), str(e)) try: self._read_rejection_threshold() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_rejection_threshold(), str(e)) try: self._read_isigma() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_isigma(), str(e)) try: self._read_misigma() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_misigma(), str(e)) try: self._read_rmerge() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_rmerge(), str(e)) try: self._read_cc_half() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_cc_half(), str(e)) try: self._read_microcrystal() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_microcrystal(), str(e)) try: self._read_failover() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_failover(), str(e)) try: self._read_blend() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_blend(), str(e)) try: self._read_completeness() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_completeness(), str(e)) try: self._read_scale_model() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_scale_model(), str(e)) # FIXME add some consistency checks in here e.g. that there are # images assigned, there is a lattice assigned if cell constants # are given and so on params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing from libtbx import Auto if mp_params.mode == 'parallel': if mp_params.type == 'qsub': if which('qsub') is None: raise Sorry('qsub not available') if mp_params.njob is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.njob = get_number_cpus() if mp_params.nproc is Auto: mp_params.nproc = 1 elif mp_params.nproc is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.nproc = get_number_cpus() Flags.set_parallel(mp_params.nproc) else: Flags.set_parallel(mp_params.nproc) elif mp_params.mode == 'serial': mp_params.njob = 1 if mp_params.nproc is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.nproc = get_number_cpus() Flags.set_parallel(mp_params.nproc) PhilIndex.update("xia2.settings.multiprocessing.njob=%d" %mp_params.njob) PhilIndex.update("xia2.settings.multiprocessing.nproc=%d" %mp_params.nproc) params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing if params.xia2.settings.indexer is not None: add_preference("indexer", params.xia2.settings.indexer) if params.xia2.settings.multi_sweep_indexing is Auto: params.xia2.settings.multi_sweep_indexing = \ Flags.get_small_molecule() and 'dials' == params.xia2.settings.indexer if params.xia2.settings.refiner is not None: add_preference("refiner", params.xia2.settings.refiner) if params.xia2.settings.integrater is not None: add_preference("integrater", params.xia2.settings.integrater) if params.xia2.settings.scaler is not None: add_preference("scaler", params.xia2.settings.scaler) if params.xia2.settings.resolution.d_min is not None: Flags.set_resolution_high(params.xia2.settings.resolution.d_min) if params.xia2.settings.resolution.d_max is not None: Flags.set_resolution_low(params.xia2.settings.resolution.d_max) Flags.set_reversephi(params.xia2.settings.input.reverse_phi) input_json = params.xia2.settings.input.json if (input_json is not None and len(input_json)): for json_file in input_json: assert os.path.isfile(json_file) load_datablock(json_file) reference_geometry = params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: reference_geometries = "\n".join( ["xia2.settings.input.reference_geometry=%s" % os.path.abspath(g) for g in params.xia2.settings.input.reference_geometry]) Debug.write(reference_geometries) PhilIndex.update(reference_geometries) Debug.write("xia2.settings.trust_beam_centre=true") PhilIndex.update("xia2.settings.trust_beam_centre=true") params = PhilIndex.get_python_object() try: self._read_xinfo() except exceptions.Exception, e: traceback.print_exc(file = open('xia2-xinfo.error', 'w')) raise RuntimeError, '%s (%s)' % \ (self._help_xinfo(), str(e)) params = PhilIndex.get_python_object() if params.xia2.settings.input.xinfo is not None: xinfo_file = os.path.abspath(params.xia2.settings.input.xinfo) PhilIndex.update("xia2.settings.input.xinfo=%s" %xinfo_file) params = PhilIndex.get_python_object() self.set_xinfo(xinfo_file) Debug.write(60 * '-') Debug.write('XINFO file: %s' % xinfo_file) for record in open(xinfo_file, 'r').readlines(): # don't want \n on the end... Debug.write(record[:-1]) Debug.write(60 * '-') else: xinfo_file = '%s/automatic.xinfo' %os.path.abspath( os.curdir) PhilIndex.update("xia2.settings.input.xinfo=%s" %xinfo_file) params = PhilIndex.get_python_object() if params.dials.find_spots.phil_file is not None: PhilIndex.update("dials.find_spots.phil_file=%s" %os.path.abspath( params.dials.find_spots.phil_file)) if params.dials.index.phil_file is not None: PhilIndex.update("dials.index.phil_file=%s" %os.path.abspath( params.dials.index.phil_file)) if params.dials.refine.phil_file is not None: PhilIndex.update("dials.refine.phil_file=%s" %os.path.abspath( params.dials.refine.phil_file)) if params.dials.integrate.phil_file is not None: PhilIndex.update("dials.integrate.phil_file=%s" %os.path.abspath( params.dials.integrate.phil_file)) params = PhilIndex.get_python_object() datasets = PhilIndex.params.xia2.settings.input.image for dataset in datasets: start_end = None if ':' in dataset: tokens = dataset.split(':') # cope with windows drives i.e. C:\data\blah\thing_0001.cbf:1:100 if len(tokens[0]) == 1: tokens = ['%s:%s' % (tokens[0], tokens[1])] + tokens[2:] if len(tokens) != 3: raise RuntimeError, '/path/to/image_0001.cbf:start:end' dataset = tokens[0] start_end = int(tokens[1]), int(tokens[2]) from xia2.Applications.xia2setup import is_hd5f_name if is_hd5f_name(dataset): self._hdf5_master_files.append(os.path.abspath(dataset)) if start_end: Debug.write('Image range: %d %d' % start_end) self._default_start_end[dataset] = start_end else: Debug.write('No image range specified') else: template, directory = image2template_directory(os.path.abspath(dataset)) self._default_template.append(template) self._default_directory.append(directory) Debug.write('Interpreted from image %s:' % dataset) Debug.write('Template %s' % template) Debug.write('Directory %s' % directory) if start_end: Debug.write('Image range: %d %d' % start_end) self._default_start_end[os.path.join(directory, template)] = start_end else: Debug.write('No image range specified') # finally, check that all arguments were read and raise an exception # if any of them were nonsense. with open('xia2-working.phil', 'wb') as f: print >> f, PhilIndex.working_phil.as_str() with open('xia2-diff.phil', 'wb') as f: print >> f, PhilIndex.get_diff().as_str() Debug.write('\nDifference PHIL:') Debug.write(PhilIndex.get_diff().as_str(), strip=False) Debug.write('Working PHIL:') Debug.write(PhilIndex.working_phil.as_str(), strip=False) nonsense = 'Unknown command-line options:' was_nonsense = False for j, argv in enumerate(self._argv): if j == 0: continue if argv[0] != '-' and '=' not in argv: continue if not j in self._understood: nonsense += ' %s' % argv was_nonsense = True if was_nonsense: raise RuntimeError, nonsense return # command line parsers, getters and help functions. def _read_beam(self): '''Read the beam centre from the command line.''' index = -1 try: index = self._argv.index('-beam') except ValueError, e: self._beam = None return if index < 0: raise RuntimeError, 'negative index' #try: #beam = self._argv[index + 1].split(',') #except IndexError, e: #raise RuntimeError, '-beam correct use "-beam x,y"' #if len(beam) != 2: #raise RuntimeError, '-beam correct use "-beam x,y"' #self._beam = (float(beam[0]), float(beam[1])) self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -beam option deprecated: please use beam_centre='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.beam_centre=%s" %self._argv[index +1 ]) PhilIndex.get_python_object() Debug.write('Beam read from command line as %f %f' % tuple( PhilIndex.params.xia2.settings.beam_centre)) self._beam = tuple(PhilIndex.params.xia2.settings.beam_centre) return def _help_beam(self): '''Return a help string for the read beam method.''' return '-beam x,y' def get_beam(self): return self._beam def _help_image(self): '''Return a string for explaining the -image method.''' return '-image /path/to/an/image_001.img' def _read_image(self): '''Read image information from the command line.''' index = -1 try: index = self._argv.index('-image') except ValueError, e: # the token is not on the command line self._default_template = [] self._default_directory = [] return image = self._argv[index + 1] # check if there is a space in the image name - this happens and it # looks like the python input parser will split it even if the # space is escaped or it is quoted if image[-1] == '\\': try: image = '%s %s' % (self._argv[index + 1][:-1], self._argv[index + 2]) except: raise RuntimeError, 'image name ends in \\' # XXX Warning added 2015-04-23 Chatter.write( "Warning: -image option deprecated: please use image='%s' instead" %( image)) PhilIndex.update("xia2.settings.input.image=%s" %image) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_atom_name(self): try: index = self._argv.index('-atom') except ValueError, e: self._default_atom_name = None return self._default_atom_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Heavy atom: %s' % \ self._default_atom_name) return def _help_atom_name(self): return '-atom se' def get_atom_name(self): return self._default_atom_name def _read_phil(self): try: index = self._argv.index('-phil') except ValueError, e: return Chatter.bigbanner('-phil option now no longer needed: ' 'please just place file on command-line', size=80) self._understood.append(index) if True: return PhilIndex.merge_param_file(self._argv[index + 1]) PhilIndex.get_python_object() self._understood.append(index + 1) Debug.write('Phil file: %s' % self._argv[index + 1]) return def _help_phil(self): return '-phil parameters.phil' def _read_project_name(self): try: index = self._argv.index('-project') except ValueError, e: self._default_project_name = None return self._default_project_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Project: %s' % self._default_project_name) return def _help_project_name(self): return '-project foo' def get_project_name(self): return self._default_project_name def _read_crystal_name(self): try: index = self._argv.index('-crystal') except ValueError, e: self._default_crystal_name = None return self._default_crystal_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Crystal: %s' % self._default_crystal_name) return def _help_crystal_name(self): return '-crystal foo' def get_crystal_name(self): return self._default_crystal_name def _read_xinfo(self): try: index = self._argv.index('-xinfo') except ValueError, e: self._xinfo = None return if index < 0: raise RuntimeError, 'negative index (no xinfo file name given)' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2015-03-02 Chatter.write( "Warning: -xinfo option deprecated: please use xinfo='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.input.xinfo=%s" %self._argv[index + 1]) PhilIndex.get_python_object() return def _help_xinfo(self): return '-xinfo example.xinfo' def set_xinfo(self, xinfo): with open(xinfo, 'rb') as f: Debug.write('\n' + xinfo) Debug.write(f.read()) self._xinfo = XProject(xinfo) def get_xinfo(self): '''Return the XProject.''' return self._xinfo def _read_xparm(self): try: index = self._argv.index('-xparm') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_xparm(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Rotation axis: %.6f %.6f %.6f' % \ Flags.get_xparm_rotation_axis()) Debug.write('Beam vector: %.6f %.6f %.6f' % \ Flags.get_xparm_beam_vector()) Debug.write('Origin: %.2f %.2f' % \ Flags.get_xparm_origin()) return def _help_xparm(self): return '-xparm GXPARM.XDS' def _read_xparm_ub(self): try: index = self._argv.index('-xparm_ub') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_xparm_ub(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Real Space A: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_a())) Debug.write('Real Space B: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_b())) Debug.write('Real Space C: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_c())) return def _help_xparm_ub(self): return '-xparm_ub GXPARM.XDS' def _read_parallel(self): try: index = self._argv.index('-parallel') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' if int(self._argv[index + 1]) < 0: raise RuntimeError, 'negative number of processors: %s' % \ self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -parallel option deprecated: please use nproc=%s instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.multiprocessing.nproc=%s" %( self._argv[index + 1])) PhilIndex.get_python_object() return def _help_parallel(self): return '-parallel N' def _read_serial(self): try: index = self._argv.index('-serial') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) Flags.set_parallel(1) PhilIndex.update("xia2.settings.multiprocessing.nproc=1") PhilIndex.get_python_object() Debug.write('Serial set (i.e. 1 CPU)') return def _help_serial(self): return '-serial' def _read_min_images(self): try: index = self._argv.index('-min_images') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2015-05-01 Chatter.write( "Warning: -min_images option deprecated: please use min_images=%s instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.input.min_images=%i" %( int(self._argv[index + 1]))) PhilIndex.get_python_object() return def _help_min_images(self): return '-min_images N' def _read_start_end(self): try: index = self._argv.index('-start_end') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' if not '-image' in self._argv: raise RuntimeError, 'do not use start_end without -image' self._understood.append(index) self._understood.append(index + 1) start, end = tuple(map(int, self._argv[index + 1].split(','))) Flags.set_start_end(start, end) Debug.write('Start, end set to %d %d' % Flags.get_start_end()) return def _help_start_end(self): return '-start_end start,end' def _read_xparallel(self): try: index = self._argv.index('-xparallel') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_xparallel(int(self._argv[index + 1])) Debug.write('XParallel set to %d' % Flags.get_xparallel()) return def _help_xparallel(self): return '-xparallel N' def _read_spacegroup(self): try: index = self._argv.index('-spacegroup') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -spacegroup option deprecated: please use space_group='%s' instead" %( self._argv[index + 1])) Flags.set_spacegroup(self._argv[index + 1]) # XXX this line should go PhilIndex.update("xia2.settings.space_group=%s" %self._argv[index + 1]) PhilIndex.get_python_object() Debug.write('Spacegroup set to %s' % self._argv[index + 1]) return def _help_spacegroup(self): return '-spacegroup P43212' def _read_resolution(self): try: index = self._argv.index('-resolution') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' resolution = self._argv[index + 1] if ',' in resolution: a, b = map(float, resolution.split(',')) dmin = min(a, b) dmax = max(a, b) else: dmin = float(resolution) dmax = None self._understood.append(index) self._understood.append(index + 1) PhilIndex.update("xia2.settings.resolution.d_min=%s" %dmin) if dmax is not None: PhilIndex.update("xia2.settings.resolution.d_max=%s" %dmax) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -resolution option deprecated: please use d_min=%s and d_max=%s instead" %( dmin, dmax)) else: # XXX Warning added 2014-11-10 Chatter.write( "Warning: -resolution option deprecated: please use d_min=%s instead" %( dmin)) PhilIndex.get_python_object() if dmax: Debug.write('Resolution set to %.3f %.3f' % (dmin, dmax)) else: Debug.write('Resolution set to %.3f' % dmin) return def _help_resolution(self): return '-resolution high[,low]' def _read_z_min(self): try: index = self._argv.index('-z_min') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_z_min(float(self._argv[index + 1])) Debug.write('Z min set to %f' % Flags.get_z_min()) return def _help_z_min(self): return '-z_min N' def _read_aimless_secondary(self): try: index = self._argv.index('-aimless_secondary') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_aimless_secondary(float(self._argv[index + 1])) Debug.write('Aimless secondary set to %f' % Flags.get_aimless_secondary()) return def _help_aimless_secondary(self): return '-aimless_secondary N' def _read_freer_file(self): try: index = self._argv.index('-freer_file') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_freer_file(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('FreeR_flag column taken from %s' % Flags.get_freer_file()) # this should also be used as an indexing reference to make # sense... Flags.set_reference_reflection_file(self._argv[index + 1]) Debug.write('Reference reflection file: %s' % Flags.get_reference_reflection_file()) # and also the spacegroup copied out?! ok - this is done # "by magic" in the scaler. return def _help_freer_file(self): return '-freer_file my_freer_file.mtz' def _read_reference_reflection_file(self): try: index = self._argv.index('-reference_reflection_file') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_reference_reflection_file(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Reference reflection file: %s' % Flags.get_reference_reflection_file()) return def _help_reference_reflection_file(self): return '-reference_reflection_file my_reference_reflection_file.mtz' def _read_rejection_threshold(self): try: index = self._argv.index('-rejection_threshold') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_rejection_threshold(float(self._argv[index + 1])) Debug.write('Rejection threshold set to %f' % \ Flags.get_rejection_threshold()) return def _help_rejection_threshold(self): return '-rejection_threshold N' def _read_isigma(self): try: index = self._argv.index('-isigma') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.isigma=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -isigma option deprecated: please use isigma=%s instead" %self._argv[index + 1]) return def _help_isigma(self): return '-isigma N' def _read_misigma(self): try: index = self._argv.index('-misigma') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.misigma=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -misigma option deprecated: please use misigma=%s instead" %self._argv[index + 1]) return def _help_misigma(self): return '-misigma N' def _read_completeness(self): try: index = self._argv.index('-completeness') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.completeness=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -completeness option deprecated: please use completeness=%s instead" %self._argv[index + 1]) return def _help_completeness(self): return '-completeness N' def _read_rmerge(self): try: index = self._argv.index('-rmerge') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.rmerge=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -rmerge option deprecated: please use rmerge=%s instead" %self._argv[index + 1]) return def _help_rmerge(self): return '-rmerge N' def _read_cc_half(self): try: index = self._argv.index('-cc_half') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.cc_half=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -cc_half option deprecated: please use cc_half=%s instead" %self._argv[index + 1]) return def _help_cc_half(self): return '-cc_half N' def _read_microcrystal(self): if '-microcrystal' in self._argv: Flags.set_microcrystal() Debug.write('Microcrystal mode on') self._understood.append(self._argv.index('-microcrystal')) return def _read_failover(self): if '-failover' in self._argv: Flags.set_failover() Debug.write('Failover mode on') self._understood.append(self._argv.index('-failover')) return def _read_blend(self): if '-blend' in self._argv: Flags.set_blend() Debug.write('Blend mode on') self._understood.append(self._argv.index('-blend')) return def _read_ispyb_xml_out(self): try: index = self._argv.index('-ispyb_xml_out') except ValueError, e: self._ispyb_xml_out = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_ispyb_xml_out(self._argv[index + 1]) Debug.write('ISPyB XML output set to %s' % self._argv[index + 1]) return def _help_ispyb_xml_out(self): return '-ispyb_xml_out project.xml' def _read_hdr_in(self): try: index = self._argv.index('-hdr_in') except ValueError, e: self._hdr_in = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_hdr_in(self._argv[index + 1]) return def _help_hdr_in(self): return '-hdr_in project.hdr' def _read_hdr_out(self): try: index = self._argv.index('-hdr_out') except ValueError, e: self._hdr_out = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_hdr_out(self._argv[index + 1]) Debug.write('Output header file set to %s' % self._argv[index + 1]) return def _help_hdr_out(self): return '-hdr_out project.hdr' def _read_pickle(self): try: index = self._argv.index('-pickle') except ValueError, e: self._pickle = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_pickle(self._argv[index + 1]) return def _help_pickle(self): return '-pickle name.pkl' def get_template(self): return self._default_template def get_start_end(self, full_template): return self._default_start_end.get(full_template, None) def get_directory(self): return self._default_directory def get_hdf5_master_files(self): return self._hdf5_master_files def _read_trust_timestamps(self): if '-trust_timestamps' in self._argv: Flags.set_trust_timestamps(True) Debug.write('Trust timestamps on') self._understood.append(self._argv.index('-trust_timestamps')) return def _read_batch_scale(self): if '-batch_scale' in self._argv: Flags.set_batch_scale(True) Debug.write('Batch scaling mode on') self._understood.append(self._argv.index('-batch_scale')) return def _read_small_molecule(self): if '-small_molecule' in self._argv: Flags.set_small_molecule(True) Debug.write('Small molecule selected') self._understood.append(self._argv.index('-small_molecule')) settings = PhilIndex.get_python_object().xia2.settings PhilIndex.update("xia2.settings.unify_setting=true") return def _read_scale_model(self): try: index = self._argv.index('-scale_model') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_scale_model(self._argv[index + 1]) Debug.write('Scaling model set to: %s' % Flags.get_scale_model()) return def _read_quick(self): if '-quick' in self._argv: Flags.set_quick(True) Debug.write('Quick mode selected') self._understood.append(self._argv.index('-quick')) return def _read_chef(self): if '-chef' in self._argv: Flags.set_chef(True) self._understood.append(self._argv.index('-chef')) Debug.write('Chef mode selected') if '-nochef' in self._argv: Flags.set_chef(False) self._understood.append(self._argv.index('-nochef')) Debug.write('Chef mode deselected') return def _read_reversephi(self): if '-reversephi' in self._argv: self._understood.append(self._argv.index('-reversephi')) # XXX Warning added 2015-11-18 Chatter.write( "Warning: -reversephi option deprecated: please use reverse_phi=True instead") PhilIndex.update("xia2.settings.input.reverse_phi=True") PhilIndex.get_python_object() return def _read_no_lattice_test(self): if '-no_lattice_test' in self._argv: Flags.set_no_lattice_test(True) self._understood.append(self._argv.index('-no_lattice_test')) Debug.write('No lattice test mode selected') return def _read_no_relax(self): if '-no_relax' in self._argv: Flags.set_relax(False) self._understood.append(self._argv.index('-no_relax')) Debug.write('XDS relax about indexing selected') return def _read_no_profile(self): if '-no_profile' in self._argv: # XXX Warning added 2016-02-24 Chatter.write( "Warning: -no_profile option deprecated: please use xds.integrate.profile_fitting=False instead") PhilIndex.update("xds.integrate.profile_fitting=False") PhilIndex.get_python_object() self._understood.append(self._argv.index('-no_profile')) return def _read_zero_dose(self): if '-zero_dose' in self._argv: Flags.set_zero_dose(True) self._understood.append(self._argv.index('-zero_dose')) Debug.write('Zero-dose mode (XDS/XSCALE) selected') return def _read_norefine(self): if '-norefine' in self._argv: Flags.set_refine(False) self._understood.append(self._argv.index('-norefine')) # FIXME what does this do??? - switch off orientation refinement # in integration return def _read_noremove(self): if '-noremove' in self._argv: self._understood.append(self._argv.index('-noremove')) Flags.set_remove(False) return def _read_2d(self): if '-2d' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=mosflm") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=mosflm") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=mosflmr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-2d')) Debug.write('2DA pipeline selected') return def _read_2di(self): if '-2di' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=mosflm") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=mosflm") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=mosflmr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-2di')) Debug.write('2DA pipeline; mosflm indexing selected') return def _read_dials(self): if '-dials' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=dials") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=dials") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=dials") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-dials')) Debug.write('DIALS pipeline selected') return def _read_3d(self): if '-3d' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xds") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3d')) Debug.write('3DR pipeline selected') return def _read_3di(self): if '-3di' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xds") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3di')) Debug.write('3DR pipeline; XDS indexing selected') return def _read_3dii(self): if '-3dii' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xdsii") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3dii')) Debug.write('3D II R pipeline (XDS IDXREF all images) selected') return def _read_3dd(self): if '-3dd' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=dials") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3dd')) Debug.write('3DD pipeline (DIALS indexing) selected') return def _read_debug(self): if '-debug' in self._argv: # join the debug stream to the main output Debug.join(Chatter) self._understood.append(self._argv.index('-debug')) Debug.write('Debugging output switched on') return def _read_interactive(self): if '-interactive' in self._argv: Flags.set_interactive(True) self._understood.append(self._argv.index('-interactive')) Debug.write('Interactive indexing ON') return def _read_ice(self): if '-ice' in self._argv: Flags.set_ice(True) self._understood.append(self._argv.index('-ice')) Debug.write('Ice ring exclusion ON') return def _read_egg(self): if '-egg' in self._argv: self._understood.append(self._argv.index('-egg')) Flags.set_egg(True) return def _read_uniform_sd(self): if '-no_uniform_sd' in self._argv: Flags.set_uniform_sd(False) self._understood.append(self._argv.index('-no_uniform_sd')) Debug.write('Uniform SD OFF') return def _read_migrate_data(self): if '-migrate_data' in self._argv: Flags.set_migrate_data(True) self._understood.append(self._argv.index('-migrate_data')) Debug.write('Data migration switched on') return def _read_cell(self): '''Read the cell constants from the command line.''' index = -1 try: index = self._argv.index('-cell') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' try: cell = self._argv[index + 1].split(',') except IndexError, e: raise RuntimeError, \ '-cell correct use "-cell a,b,c,alpha,beta,gamma"' if len(cell) != 6: raise RuntimeError, \ '-cell correct use "-cell a,b,c,alpha,beta,gamma"' _cell = tuple(map(float, cell)) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -cell option deprecated: please use unit_cell='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.unit_cell=%s,%s,%s,%s,%s,%s" %_cell) PhilIndex.get_python_object() format = 6 * ' %7.2f' self._understood.append(index) self._understood.append(index + 1) Debug.write('Cell read from command line:' + \ format % _cell) return def _help_cell(self): '''Return a help string for the read cell method.''' return '-cell a,b,c,alpha,beta,gamma' def _read_free_fraction(self): try: index = self._argv.index('-free_fraction') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_free_fraction(float(self._argv[index + 1])) Debug.write('Free fraction set to %f' % Flags.get_free_fraction()) return def _help_free_fraction(self): return '-free_fraction N' def _read_free_total(self): try: index = self._argv.index('-free_total') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_free_total(int(self._argv[index + 1])) Debug.write('Free total set to %f' % Flags.get_free_total()) return def _help_free_total(self): return '-free_total N' def _read_mask(self): try: index = self._argv.index('-mask') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_mask(self._argv[index + 1]) return def get_mask(self): return self._mask def _help_mask(self): return '-mask mask.dat' def get_mask(self): return self._mask def _read_fixed_628(self): try: index = self._argv.index('-fixed_628') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) Flags.set_fixed_628() return def _help_fixed_628(self): return '-fixed_628' def _read_indexer(self): try: index = self._argv.index('-indexer') except ValueError, e: return indexer = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -indexer option deprecated: please use indexer='%s' instead" %( indexer)) PhilIndex.update("xia2.settings.indexer=%s" %indexer) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_integrater(self): try: index = self._argv.index('-integrater') except ValueError, e: return integrater = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -integrater option deprecated: please use integrater='%s' instead" %( integrater)) PhilIndex.update("xia2.settings.integrater=%s" %integrater) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_scaler(self): try: index = self._argv.index('-scaler') except ValueError, e: return scaler = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -scaler option deprecated: please use scaler='%s' instead" %( scaler)) PhilIndex.update("xia2.settings.scaler=%s" %scaler) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_executables(self): try: index = self._argv.index('-executable') except ValueError, e: return executable_string = self._argv[index + 1] assert('=' in executable_string) executable, path = executable_string.split('=') Executables.add(executable, path) self._understood.append(index) self._understood.append(index + 1) return CommandLine = _CommandLine() CommandLine.setup() if __name__ == '__main__': print CommandLine.get_beam() print CommandLine.get_xinfo() Clean up as we go #!/usr/bin/env python # CommandLine.py # Copyright (C) 2006 CCLRC, Graeme Winter # # This code is distributed under the BSD license, a copy of which is # included in the root directory of this package. # # 12th June 2006 # # A handler for all of the information which may be passed in on the command # line. This singleton object should be able to handle the input, structure # it and make it available in a useful fashion. # # This is a hook into a global data repository, should mostly be replaced with # a Phil interface. import sys import os import exceptions import copy import traceback from xia2.Experts.FindImages import image2template_directory from xia2.Schema.XProject import XProject from xia2.Handlers.Flags import Flags from xia2.Handlers.Phil import PhilIndex from xia2.Handlers.Streams import Chatter, Debug from xia2.Handlers.PipelineSelection import add_preference, get_preferences from xia2.Handlers.Executables import Executables from libtbx.utils import Sorry def which(pgm): # python equivalent to the 'which' command # http://stackoverflow.com/questions/9877462/is-there-a-python-equivalent-to-the-which-command path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p,pgm) if os.path.exists(p) and os.access(p,os.X_OK): return p def load_datablock(filename): from xia2.Schema import imageset_cache, update_with_reference_geometry from dxtbx.serialize import load from libtbx.containers import OrderedDict datablocks = load.datablock(filename, check_format=False) for datablock in datablocks: imagesets = datablock.extract_imagesets() params = PhilIndex.get_python_object() reference_geometry = params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: update_with_reference_geometry(imagesets, reference_geometry) for imageset in imagesets: template = imageset.get_template() if template not in imageset_cache: imageset_cache[template] = OrderedDict() imageset_cache[template][ imageset.get_scan().get_image_range()[0]] = imageset class _CommandLine(object): '''A class to represent the command line input.''' def __init__(self): '''Initialise all of the information from the command line.''' self._argv = [] self._understood = [] self._default_template = [] self._default_directory = [] self._hdf5_master_files = [] self._default_start_end = { } return def get_argv(self): return self._argv def print_command_line(self): cl = self.get_command_line() Chatter.write('Command line: %s' % cl) return def get_command_line(self): import libtbx.load_env cl = libtbx.env.dispatcher_name if cl: if 'xia2' not in cl or 'python' in cl: cl = 'xia2' else: cl = 'xia2' for arg in sys.argv[1:]: if ' ' in arg: arg = '"%s"' %arg cl += ' %s' % arg return cl def setup(self): '''Set everything up...''' # check arguments are all ascii for token in sys.argv: try: token.encode('ascii') except UnicodeDecodeError, e: raise RuntimeError, 'non-ascii characters in input' self._argv = copy.deepcopy(sys.argv) # first of all try to interpret arguments as phil parameters/files from xia2.Handlers.Phil import master_phil from libtbx.phil import command_line cmd_line = command_line.argument_interpreter(master_phil=master_phil) working_phil, self._argv = cmd_line.process_and_fetch( args=self._argv, custom_processor="collect_remaining") PhilIndex.merge_phil(working_phil) try: PhilIndex.get_python_object() except RuntimeError, e: raise Sorry(e) #PhilIndex.get_diff().show() # things which are single token flags... self._read_debug() self._read_interactive() self._read_ice() self._read_egg() self._read_uniform_sd() self._read_trust_timestamps() self._read_batch_scale() self._read_small_molecule() self._read_quick() self._read_chef() self._read_mask() self._read_reversephi() self._read_no_lattice_test() self._read_no_relax() self._read_no_profile() self._read_norefine() self._read_noremove() # pipeline options self._read_2d() self._read_2di() self._read_dials() self._read_3d() self._read_3di() self._read_3dii() self._read_3dd() self._read_migrate_data() self._read_zero_dose() self._read_free_fraction() self._read_free_total() # finer grained control over the selection of indexer, integrater # and scaler to use. self._read_indexer() self._read_integrater() self._read_scaler() self._read_executables() # flags relating to unfixed bugs... self._read_fixed_628() try: self._read_beam() except: raise RuntimeError, self._help_beam() try: self._read_cell() except: raise RuntimeError, self._help_cell() try: self._read_image() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_image(), str(e)) try: self._read_project_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_project_name(), str(e)) try: self._read_atom_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_atom_name(), str(e)) try: self._read_phil() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_phil(), str(e)) try: self._read_crystal_name() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_crystal_name(), str(e)) try: self._read_ispyb_xml_out() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_ispyb_xml_out(), str(e)) try: self._read_hdr_in() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_hdr_in(), str(e)) try: self._read_hdr_out() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_hdr_out(), str(e)) try: self._read_pickle() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_pickle(), str(e)) try: self._read_parallel() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_parallel(), str(e)) try: self._read_serial() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_serial(), str(e)) try: self._read_xparm() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparm(), str(e)) try: self._read_xparm_ub() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparm_ub(), str(e)) try: self._read_min_images() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_min_images(), str(e)) try: self._read_start_end() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_start_end(), str(e)) try: self._read_xparallel() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_xparallel(), str(e)) try: self._read_spacegroup() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_spacegroup(), str(e)) try: self._read_resolution() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_resolution(), str(e)) try: self._read_z_min() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_z_min(), str(e)) try: self._read_aimless_secondary() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_aimless_secondary(), str(e)) try: self._read_freer_file() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_freer_file(), str(e)) try: self._read_reference_reflection_file() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_reference_reflection_file(), str(e)) try: self._read_rejection_threshold() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_rejection_threshold(), str(e)) try: self._read_isigma() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_isigma(), str(e)) try: self._read_misigma() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_misigma(), str(e)) try: self._read_rmerge() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_rmerge(), str(e)) try: self._read_cc_half() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_cc_half(), str(e)) try: self._read_microcrystal() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_microcrystal(), str(e)) try: self._read_failover() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_failover(), str(e)) try: self._read_blend() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_blend(), str(e)) try: self._read_completeness() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_completeness(), str(e)) try: self._read_scale_model() except exceptions.Exception, e: raise RuntimeError, '%s (%s)' % \ (self._help_scale_model(), str(e)) # FIXME add some consistency checks in here e.g. that there are # images assigned, there is a lattice assigned if cell constants # are given and so on params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing from libtbx import Auto if mp_params.mode == 'parallel': if mp_params.type == 'qsub': if which('qsub') is None: raise Sorry('qsub not available') if mp_params.njob is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.njob = get_number_cpus() if mp_params.nproc is Auto: mp_params.nproc = 1 elif mp_params.nproc is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.nproc = get_number_cpus() Flags.set_parallel(mp_params.nproc) else: Flags.set_parallel(mp_params.nproc) elif mp_params.mode == 'serial': mp_params.njob = 1 if mp_params.nproc is Auto: from xia2.Handlers.Environment import get_number_cpus mp_params.nproc = get_number_cpus() Flags.set_parallel(mp_params.nproc) PhilIndex.update("xia2.settings.multiprocessing.njob=%d" %mp_params.njob) PhilIndex.update("xia2.settings.multiprocessing.nproc=%d" %mp_params.nproc) params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing if params.xia2.settings.indexer is not None: add_preference("indexer", params.xia2.settings.indexer) if params.xia2.settings.multi_sweep_indexing is Auto: params.xia2.settings.multi_sweep_indexing = \ Flags.get_small_molecule() and 'dials' == params.xia2.settings.indexer if params.xia2.settings.refiner is not None: add_preference("refiner", params.xia2.settings.refiner) if params.xia2.settings.integrater is not None: add_preference("integrater", params.xia2.settings.integrater) if params.xia2.settings.scaler is not None: add_preference("scaler", params.xia2.settings.scaler) if params.xia2.settings.resolution.d_min is not None: Flags.set_resolution_high(params.xia2.settings.resolution.d_min) if params.xia2.settings.resolution.d_max is not None: Flags.set_resolution_low(params.xia2.settings.resolution.d_max) Flags.set_reversephi(params.xia2.settings.input.reverse_phi) input_json = params.xia2.settings.input.json if (input_json is not None and len(input_json)): for json_file in input_json: assert os.path.isfile(json_file) load_datablock(json_file) reference_geometry = params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: reference_geometries = "\n".join( ["xia2.settings.input.reference_geometry=%s" % os.path.abspath(g) for g in params.xia2.settings.input.reference_geometry]) Debug.write(reference_geometries) PhilIndex.update(reference_geometries) Debug.write("xia2.settings.trust_beam_centre=true") PhilIndex.update("xia2.settings.trust_beam_centre=true") params = PhilIndex.get_python_object() try: self._read_xinfo() except exceptions.Exception, e: traceback.print_exc(file = open('xia2-xinfo.error', 'w')) raise RuntimeError, '%s (%s)' % \ (self._help_xinfo(), str(e)) params = PhilIndex.get_python_object() if params.xia2.settings.input.xinfo is not None: xinfo_file = os.path.abspath(params.xia2.settings.input.xinfo) PhilIndex.update("xia2.settings.input.xinfo=%s" %xinfo_file) params = PhilIndex.get_python_object() self.set_xinfo(xinfo_file) Debug.write(60 * '-') Debug.write('XINFO file: %s' % xinfo_file) for record in open(xinfo_file, 'r').readlines(): # don't want \n on the end... Debug.write(record[:-1]) Debug.write(60 * '-') else: xinfo_file = '%s/automatic.xinfo' %os.path.abspath( os.curdir) PhilIndex.update("xia2.settings.input.xinfo=%s" %xinfo_file) params = PhilIndex.get_python_object() if params.dials.find_spots.phil_file is not None: PhilIndex.update("dials.find_spots.phil_file=%s" %os.path.abspath( params.dials.find_spots.phil_file)) if params.dials.index.phil_file is not None: PhilIndex.update("dials.index.phil_file=%s" %os.path.abspath( params.dials.index.phil_file)) if params.dials.refine.phil_file is not None: PhilIndex.update("dials.refine.phil_file=%s" %os.path.abspath( params.dials.refine.phil_file)) if params.dials.integrate.phil_file is not None: PhilIndex.update("dials.integrate.phil_file=%s" %os.path.abspath( params.dials.integrate.phil_file)) params = PhilIndex.get_python_object() datasets = PhilIndex.params.xia2.settings.input.image for dataset in datasets: start_end = None if ':' in dataset: tokens = dataset.split(':') # cope with windows drives i.e. C:\data\blah\thing_0001.cbf:1:100 if len(tokens[0]) == 1: tokens = ['%s:%s' % (tokens[0], tokens[1])] + tokens[2:] if len(tokens) != 3: raise RuntimeError, '/path/to/image_0001.cbf:start:end' dataset = tokens[0] start_end = int(tokens[1]), int(tokens[2]) from xia2.Applications.xia2setup import is_hd5f_name if is_hd5f_name(dataset): self._hdf5_master_files.append(os.path.abspath(dataset)) if start_end: Debug.write('Image range: %d %d' % start_end) self._default_start_end[dataset] = start_end else: Debug.write('No image range specified') else: template, directory = image2template_directory(os.path.abspath(dataset)) self._default_template.append(template) self._default_directory.append(directory) Debug.write('Interpreted from image %s:' % dataset) Debug.write('Template %s' % template) Debug.write('Directory %s' % directory) if start_end: Debug.write('Image range: %d %d' % start_end) self._default_start_end[os.path.join(directory, template)] = start_end else: Debug.write('No image range specified') # finally, check that all arguments were read and raise an exception # if any of them were nonsense. with open('xia2-working.phil', 'wb') as f: print >> f, PhilIndex.working_phil.as_str() with open('xia2-diff.phil', 'wb') as f: print >> f, PhilIndex.get_diff().as_str() Debug.write('\nDifference PHIL:') Debug.write(PhilIndex.get_diff().as_str(), strip=False) Debug.write('Working PHIL:') Debug.write(PhilIndex.working_phil.as_str(), strip=False) nonsense = 'Unknown command-line options:' was_nonsense = False for j, argv in enumerate(self._argv): if j == 0: continue if argv[0] != '-' and '=' not in argv: continue if not j in self._understood: nonsense += ' %s' % argv was_nonsense = True if was_nonsense: raise RuntimeError, nonsense return # command line parsers, getters and help functions. def _read_beam(self): '''Read the beam centre from the command line.''' index = -1 try: index = self._argv.index('-beam') except ValueError, e: self._beam = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -beam option deprecated: please use beam_centre='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.beam_centre=%s" %self._argv[index +1 ]) PhilIndex.get_python_object() Debug.write('Beam read from command line as %f %f' % tuple( PhilIndex.params.xia2.settings.beam_centre)) self._beam = tuple(PhilIndex.params.xia2.settings.beam_centre) return def _help_beam(self): '''Return a help string for the read beam method.''' return '-beam x,y' def get_beam(self): return self._beam def _help_image(self): '''Return a string for explaining the -image method.''' return '-image /path/to/an/image_001.img' def _read_image(self): '''Read image information from the command line.''' index = -1 try: index = self._argv.index('-image') except ValueError, e: # the token is not on the command line self._default_template = [] self._default_directory = [] return image = self._argv[index + 1] # check if there is a space in the image name - this happens and it # looks like the python input parser will split it even if the # space is escaped or it is quoted if image[-1] == '\\': try: image = '%s %s' % (self._argv[index + 1][:-1], self._argv[index + 2]) except: raise RuntimeError, 'image name ends in \\' # XXX Warning added 2015-04-23 Chatter.write( "Warning: -image option deprecated: please use image='%s' instead" %( image)) PhilIndex.update("xia2.settings.input.image=%s" %image) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_atom_name(self): try: index = self._argv.index('-atom') except ValueError, e: self._default_atom_name = None return self._default_atom_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Heavy atom: %s' % \ self._default_atom_name) return def _help_atom_name(self): return '-atom se' def get_atom_name(self): return self._default_atom_name def _read_phil(self): try: index = self._argv.index('-phil') except ValueError, e: return Chatter.bigbanner('-phil option now no longer needed: ' 'please just place file on command-line', size=80) self._understood.append(index) if True: return PhilIndex.merge_param_file(self._argv[index + 1]) PhilIndex.get_python_object() self._understood.append(index + 1) Debug.write('Phil file: %s' % self._argv[index + 1]) return def _help_phil(self): return '-phil parameters.phil' def _read_project_name(self): try: index = self._argv.index('-project') except ValueError, e: self._default_project_name = None return self._default_project_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Project: %s' % self._default_project_name) return def _help_project_name(self): return '-project foo' def get_project_name(self): return self._default_project_name def _read_crystal_name(self): try: index = self._argv.index('-crystal') except ValueError, e: self._default_crystal_name = None return self._default_crystal_name = self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) Debug.write('Crystal: %s' % self._default_crystal_name) return def _help_crystal_name(self): return '-crystal foo' def get_crystal_name(self): return self._default_crystal_name def _read_xinfo(self): try: index = self._argv.index('-xinfo') except ValueError, e: self._xinfo = None return if index < 0: raise RuntimeError, 'negative index (no xinfo file name given)' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2015-03-02 Chatter.write( "Warning: -xinfo option deprecated: please use xinfo='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.input.xinfo=%s" %self._argv[index + 1]) PhilIndex.get_python_object() return def _help_xinfo(self): return '-xinfo example.xinfo' def set_xinfo(self, xinfo): with open(xinfo, 'rb') as f: Debug.write('\n' + xinfo) Debug.write(f.read()) self._xinfo = XProject(xinfo) def get_xinfo(self): '''Return the XProject.''' return self._xinfo def _read_xparm(self): try: index = self._argv.index('-xparm') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_xparm(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Rotation axis: %.6f %.6f %.6f' % \ Flags.get_xparm_rotation_axis()) Debug.write('Beam vector: %.6f %.6f %.6f' % \ Flags.get_xparm_beam_vector()) Debug.write('Origin: %.2f %.2f' % \ Flags.get_xparm_origin()) return def _help_xparm(self): return '-xparm GXPARM.XDS' def _read_xparm_ub(self): try: index = self._argv.index('-xparm_ub') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_xparm_ub(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Real Space A: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_a())) Debug.write('Real Space B: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_b())) Debug.write('Real Space C: %.2f %.2f %.2f' % \ tuple(Flags.get_xparm_c())) return def _help_xparm_ub(self): return '-xparm_ub GXPARM.XDS' def _read_parallel(self): try: index = self._argv.index('-parallel') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' if int(self._argv[index + 1]) < 0: raise RuntimeError, 'negative number of processors: %s' % \ self._argv[index + 1] self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -parallel option deprecated: please use nproc=%s instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.multiprocessing.nproc=%s" %( self._argv[index + 1])) PhilIndex.get_python_object() return def _help_parallel(self): return '-parallel N' def _read_serial(self): try: index = self._argv.index('-serial') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) Flags.set_parallel(1) PhilIndex.update("xia2.settings.multiprocessing.nproc=1") PhilIndex.get_python_object() Debug.write('Serial set (i.e. 1 CPU)') return def _help_serial(self): return '-serial' def _read_min_images(self): try: index = self._argv.index('-min_images') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2015-05-01 Chatter.write( "Warning: -min_images option deprecated: please use min_images=%s instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.input.min_images=%i" %( int(self._argv[index + 1]))) PhilIndex.get_python_object() return def _help_min_images(self): return '-min_images N' def _read_start_end(self): try: index = self._argv.index('-start_end') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' if not '-image' in self._argv: raise RuntimeError, 'do not use start_end without -image' self._understood.append(index) self._understood.append(index + 1) start, end = tuple(map(int, self._argv[index + 1].split(','))) Flags.set_start_end(start, end) Debug.write('Start, end set to %d %d' % Flags.get_start_end()) return def _help_start_end(self): return '-start_end start,end' def _read_xparallel(self): try: index = self._argv.index('-xparallel') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_xparallel(int(self._argv[index + 1])) Debug.write('XParallel set to %d' % Flags.get_xparallel()) return def _help_xparallel(self): return '-xparallel N' def _read_spacegroup(self): try: index = self._argv.index('-spacegroup') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -spacegroup option deprecated: please use space_group='%s' instead" %( self._argv[index + 1])) Flags.set_spacegroup(self._argv[index + 1]) # XXX this line should go PhilIndex.update("xia2.settings.space_group=%s" %self._argv[index + 1]) PhilIndex.get_python_object() Debug.write('Spacegroup set to %s' % self._argv[index + 1]) return def _help_spacegroup(self): return '-spacegroup P43212' def _read_resolution(self): try: index = self._argv.index('-resolution') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' resolution = self._argv[index + 1] if ',' in resolution: a, b = map(float, resolution.split(',')) dmin = min(a, b) dmax = max(a, b) else: dmin = float(resolution) dmax = None self._understood.append(index) self._understood.append(index + 1) PhilIndex.update("xia2.settings.resolution.d_min=%s" %dmin) if dmax is not None: PhilIndex.update("xia2.settings.resolution.d_max=%s" %dmax) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -resolution option deprecated: please use d_min=%s and d_max=%s instead" %( dmin, dmax)) else: # XXX Warning added 2014-11-10 Chatter.write( "Warning: -resolution option deprecated: please use d_min=%s instead" %( dmin)) PhilIndex.get_python_object() if dmax: Debug.write('Resolution set to %.3f %.3f' % (dmin, dmax)) else: Debug.write('Resolution set to %.3f' % dmin) return def _help_resolution(self): return '-resolution high[,low]' def _read_z_min(self): try: index = self._argv.index('-z_min') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_z_min(float(self._argv[index + 1])) Debug.write('Z min set to %f' % Flags.get_z_min()) return def _help_z_min(self): return '-z_min N' def _read_aimless_secondary(self): try: index = self._argv.index('-aimless_secondary') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_aimless_secondary(float(self._argv[index + 1])) Debug.write('Aimless secondary set to %f' % Flags.get_aimless_secondary()) return def _help_aimless_secondary(self): return '-aimless_secondary N' def _read_freer_file(self): try: index = self._argv.index('-freer_file') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_freer_file(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('FreeR_flag column taken from %s' % Flags.get_freer_file()) # this should also be used as an indexing reference to make # sense... Flags.set_reference_reflection_file(self._argv[index + 1]) Debug.write('Reference reflection file: %s' % Flags.get_reference_reflection_file()) # and also the spacegroup copied out?! ok - this is done # "by magic" in the scaler. return def _help_freer_file(self): return '-freer_file my_freer_file.mtz' def _read_reference_reflection_file(self): try: index = self._argv.index('-reference_reflection_file') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' Flags.set_reference_reflection_file(self._argv[index + 1]) self._understood.append(index) self._understood.append(index + 1) Debug.write('Reference reflection file: %s' % Flags.get_reference_reflection_file()) return def _help_reference_reflection_file(self): return '-reference_reflection_file my_reference_reflection_file.mtz' def _read_rejection_threshold(self): try: index = self._argv.index('-rejection_threshold') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_rejection_threshold(float(self._argv[index + 1])) Debug.write('Rejection threshold set to %f' % \ Flags.get_rejection_threshold()) return def _help_rejection_threshold(self): return '-rejection_threshold N' def _read_isigma(self): try: index = self._argv.index('-isigma') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.isigma=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -isigma option deprecated: please use isigma=%s instead" %self._argv[index + 1]) return def _help_isigma(self): return '-isigma N' def _read_misigma(self): try: index = self._argv.index('-misigma') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.misigma=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -misigma option deprecated: please use misigma=%s instead" %self._argv[index + 1]) return def _help_misigma(self): return '-misigma N' def _read_completeness(self): try: index = self._argv.index('-completeness') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.completeness=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -completeness option deprecated: please use completeness=%s instead" %self._argv[index + 1]) return def _help_completeness(self): return '-completeness N' def _read_rmerge(self): try: index = self._argv.index('-rmerge') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.rmerge=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -rmerge option deprecated: please use rmerge=%s instead" %self._argv[index + 1]) return def _help_rmerge(self): return '-rmerge N' def _read_cc_half(self): try: index = self._argv.index('-cc_half') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) PhilIndex.update( "xia2.settings.resolution.cc_half=%s" %self._argv[index + 1]) # XXX Warning added 2015-12-01 Chatter.write( "Warning: -cc_half option deprecated: please use cc_half=%s instead" %self._argv[index + 1]) return def _help_cc_half(self): return '-cc_half N' def _read_microcrystal(self): if '-microcrystal' in self._argv: Flags.set_microcrystal() Debug.write('Microcrystal mode on') self._understood.append(self._argv.index('-microcrystal')) return def _read_failover(self): if '-failover' in self._argv: Flags.set_failover() Debug.write('Failover mode on') self._understood.append(self._argv.index('-failover')) return def _read_blend(self): if '-blend' in self._argv: Flags.set_blend() Debug.write('Blend mode on') self._understood.append(self._argv.index('-blend')) return def _read_ispyb_xml_out(self): try: index = self._argv.index('-ispyb_xml_out') except ValueError, e: self._ispyb_xml_out = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_ispyb_xml_out(self._argv[index + 1]) Debug.write('ISPyB XML output set to %s' % self._argv[index + 1]) return def _help_ispyb_xml_out(self): return '-ispyb_xml_out project.xml' def _read_hdr_in(self): try: index = self._argv.index('-hdr_in') except ValueError, e: self._hdr_in = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_hdr_in(self._argv[index + 1]) return def _help_hdr_in(self): return '-hdr_in project.hdr' def _read_hdr_out(self): try: index = self._argv.index('-hdr_out') except ValueError, e: self._hdr_out = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_hdr_out(self._argv[index + 1]) Debug.write('Output header file set to %s' % self._argv[index + 1]) return def _help_hdr_out(self): return '-hdr_out project.hdr' def _read_pickle(self): try: index = self._argv.index('-pickle') except ValueError, e: self._pickle = None return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_pickle(self._argv[index + 1]) return def _help_pickle(self): return '-pickle name.pkl' def get_template(self): return self._default_template def get_start_end(self, full_template): return self._default_start_end.get(full_template, None) def get_directory(self): return self._default_directory def get_hdf5_master_files(self): return self._hdf5_master_files def _read_trust_timestamps(self): if '-trust_timestamps' in self._argv: Flags.set_trust_timestamps(True) Debug.write('Trust timestamps on') self._understood.append(self._argv.index('-trust_timestamps')) return def _read_batch_scale(self): if '-batch_scale' in self._argv: Flags.set_batch_scale(True) Debug.write('Batch scaling mode on') self._understood.append(self._argv.index('-batch_scale')) return def _read_small_molecule(self): if '-small_molecule' in self._argv: Flags.set_small_molecule(True) Debug.write('Small molecule selected') self._understood.append(self._argv.index('-small_molecule')) settings = PhilIndex.get_python_object().xia2.settings PhilIndex.update("xia2.settings.unify_setting=true") return def _read_scale_model(self): try: index = self._argv.index('-scale_model') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_scale_model(self._argv[index + 1]) Debug.write('Scaling model set to: %s' % Flags.get_scale_model()) return def _read_quick(self): if '-quick' in self._argv: Flags.set_quick(True) Debug.write('Quick mode selected') self._understood.append(self._argv.index('-quick')) return def _read_chef(self): if '-chef' in self._argv: Flags.set_chef(True) self._understood.append(self._argv.index('-chef')) Debug.write('Chef mode selected') if '-nochef' in self._argv: Flags.set_chef(False) self._understood.append(self._argv.index('-nochef')) Debug.write('Chef mode deselected') return def _read_reversephi(self): if '-reversephi' in self._argv: self._understood.append(self._argv.index('-reversephi')) # XXX Warning added 2015-11-18 Chatter.write( "Warning: -reversephi option deprecated: please use reverse_phi=True instead") PhilIndex.update("xia2.settings.input.reverse_phi=True") PhilIndex.get_python_object() return def _read_no_lattice_test(self): if '-no_lattice_test' in self._argv: Flags.set_no_lattice_test(True) self._understood.append(self._argv.index('-no_lattice_test')) Debug.write('No lattice test mode selected') return def _read_no_relax(self): if '-no_relax' in self._argv: Flags.set_relax(False) self._understood.append(self._argv.index('-no_relax')) Debug.write('XDS relax about indexing selected') return def _read_no_profile(self): if '-no_profile' in self._argv: # XXX Warning added 2016-02-24 Chatter.write( "Warning: -no_profile option deprecated: please use xds.integrate.profile_fitting=False instead") PhilIndex.update("xds.integrate.profile_fitting=False") PhilIndex.get_python_object() self._understood.append(self._argv.index('-no_profile')) return def _read_zero_dose(self): if '-zero_dose' in self._argv: Flags.set_zero_dose(True) self._understood.append(self._argv.index('-zero_dose')) Debug.write('Zero-dose mode (XDS/XSCALE) selected') return def _read_norefine(self): if '-norefine' in self._argv: Flags.set_refine(False) self._understood.append(self._argv.index('-norefine')) # FIXME what does this do??? - switch off orientation refinement # in integration return def _read_noremove(self): if '-noremove' in self._argv: self._understood.append(self._argv.index('-noremove')) Flags.set_remove(False) return def _read_2d(self): if '-2d' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=mosflm") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=mosflm") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=mosflmr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-2d')) Debug.write('2DA pipeline selected') return def _read_2di(self): if '-2di' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=mosflm") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=mosflm") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=mosflmr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-2di')) Debug.write('2DA pipeline; mosflm indexing selected') return def _read_dials(self): if '-dials' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=dials") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=dials") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=dials") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=ccp4a") PhilIndex.get_python_object() self._understood.append(self._argv.index('-dials')) Debug.write('DIALS pipeline selected') return def _read_3d(self): if '-3d' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xds") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3d')) Debug.write('3DR pipeline selected') return def _read_3di(self): if '-3di' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xds") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3di')) Debug.write('3DR pipeline; XDS indexing selected') return def _read_3dii(self): if '-3dii' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=xdsii") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3dii')) Debug.write('3D II R pipeline (XDS IDXREF all images) selected') return def _read_3dd(self): if '-3dd' in self._argv: settings = PhilIndex.get_python_object().xia2.settings if settings.indexer is None: PhilIndex.update("xia2.settings.indexer=dials") if settings.refiner is None: PhilIndex.update("xia2.settings.refiner=xds") if settings.integrater is None: PhilIndex.update("xia2.settings.integrater=xdsr") if settings.scaler is None: PhilIndex.update("xia2.settings.scaler=xdsa") PhilIndex.get_python_object() self._understood.append(self._argv.index('-3dd')) Debug.write('3DD pipeline (DIALS indexing) selected') return def _read_debug(self): if '-debug' in self._argv: # join the debug stream to the main output Debug.join(Chatter) self._understood.append(self._argv.index('-debug')) Debug.write('Debugging output switched on') return def _read_interactive(self): if '-interactive' in self._argv: Flags.set_interactive(True) self._understood.append(self._argv.index('-interactive')) Debug.write('Interactive indexing ON') return def _read_ice(self): if '-ice' in self._argv: Flags.set_ice(True) self._understood.append(self._argv.index('-ice')) Debug.write('Ice ring exclusion ON') return def _read_egg(self): if '-egg' in self._argv: self._understood.append(self._argv.index('-egg')) Flags.set_egg(True) return def _read_uniform_sd(self): if '-no_uniform_sd' in self._argv: Flags.set_uniform_sd(False) self._understood.append(self._argv.index('-no_uniform_sd')) Debug.write('Uniform SD OFF') return def _read_migrate_data(self): if '-migrate_data' in self._argv: Flags.set_migrate_data(True) self._understood.append(self._argv.index('-migrate_data')) Debug.write('Data migration switched on') return def _read_cell(self): '''Read the cell constants from the command line.''' index = -1 try: index = self._argv.index('-cell') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' try: cell = self._argv[index + 1].split(',') except IndexError, e: raise RuntimeError, \ '-cell correct use "-cell a,b,c,alpha,beta,gamma"' if len(cell) != 6: raise RuntimeError, \ '-cell correct use "-cell a,b,c,alpha,beta,gamma"' _cell = tuple(map(float, cell)) # XXX Warning added 2014-11-10 Chatter.write( "Warning: -cell option deprecated: please use unit_cell='%s' instead" %( self._argv[index + 1])) PhilIndex.update("xia2.settings.unit_cell=%s,%s,%s,%s,%s,%s" %_cell) PhilIndex.get_python_object() format = 6 * ' %7.2f' self._understood.append(index) self._understood.append(index + 1) Debug.write('Cell read from command line:' + \ format % _cell) return def _help_cell(self): '''Return a help string for the read cell method.''' return '-cell a,b,c,alpha,beta,gamma' def _read_free_fraction(self): try: index = self._argv.index('-free_fraction') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_free_fraction(float(self._argv[index + 1])) Debug.write('Free fraction set to %f' % Flags.get_free_fraction()) return def _help_free_fraction(self): return '-free_fraction N' def _read_free_total(self): try: index = self._argv.index('-free_total') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_free_total(int(self._argv[index + 1])) Debug.write('Free total set to %f' % Flags.get_free_total()) return def _help_free_total(self): return '-free_total N' def _read_mask(self): try: index = self._argv.index('-mask') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) self._understood.append(index + 1) Flags.set_mask(self._argv[index + 1]) return def get_mask(self): return self._mask def _help_mask(self): return '-mask mask.dat' def get_mask(self): return self._mask def _read_fixed_628(self): try: index = self._argv.index('-fixed_628') except ValueError, e: return if index < 0: raise RuntimeError, 'negative index' self._understood.append(index) Flags.set_fixed_628() return def _help_fixed_628(self): return '-fixed_628' def _read_indexer(self): try: index = self._argv.index('-indexer') except ValueError, e: return indexer = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -indexer option deprecated: please use indexer='%s' instead" %( indexer)) PhilIndex.update("xia2.settings.indexer=%s" %indexer) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_integrater(self): try: index = self._argv.index('-integrater') except ValueError, e: return integrater = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -integrater option deprecated: please use integrater='%s' instead" %( integrater)) PhilIndex.update("xia2.settings.integrater=%s" %integrater) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_scaler(self): try: index = self._argv.index('-scaler') except ValueError, e: return scaler = self._argv[index + 1] # XXX Warning added 2014-11-10 Chatter.write( "Warning: -scaler option deprecated: please use scaler='%s' instead" %( scaler)) PhilIndex.update("xia2.settings.scaler=%s" %scaler) PhilIndex.get_python_object() self._understood.append(index) self._understood.append(index + 1) return def _read_executables(self): try: index = self._argv.index('-executable') except ValueError, e: return executable_string = self._argv[index + 1] assert('=' in executable_string) executable, path = executable_string.split('=') Executables.add(executable, path) self._understood.append(index) self._understood.append(index + 1) return CommandLine = _CommandLine() CommandLine.setup() if __name__ == '__main__': print CommandLine.get_beam() print CommandLine.get_xinfo()
#SPDX-License-Identifier: MIT import ghdata import os import sys if (sys.version_info > (3, 0)): import configparser as configparser else: import ConfigParser as configparser from flask import Flask, request, Response, send_from_directory from flask_cors import CORS GHDATA_API_VERSION = 'unstable' def serialize(func, **args): """ Serailizes a function that returns a dataframe """ data = func(**args) if hasattr(data, 'to_json'): return data.to_json(orient='records', date_format='iso', date_unit='ms') else: return data def flaskify_ghtorrent(ghtorrent, func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ def generated_function(owner, repo): repoid = ghtorrent.repoid(owner=owner, repo=repo) return Response(response=serialize(func, repoid=repoid), status=200, mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function def flaskify(func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ def generated_function(owner, repo): return Response(response=serialize(func, owner=owner, repo=repo), status=200, mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function def read_config(parser, section, name, environment_variable, default): try: value = os.getenv(environment_variable, parser.get(section, name)) return value except: if not parser.has_section(section): parser.add_section(section) parser.set(section, name, default) with open('ghdata.cfg', 'w') as configfile: parser.write(configfile) return default def run(): app = Flask(__name__, static_url_path=os.path.abspath('static/')) CORS(app) # Try to open the config file and parse it parser = configparser.RawConfigParser() parser.read('ghdata.cfg') try: dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'), read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'), read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'), read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'), read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14') ) print("Connecting with " + dbstr) ghtorrent = ghdata.GHTorrent(dbstr=dbstr) except Exception as e: print("Failed to connect to database (" + str(e) + ")"); host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0') port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000') publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None')) github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None')) if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'): debugmode = True else: debugmode = False """ @api {get} / API Status @apiName Status @apiGroup Misc """ @app.route('/{}/'.format(GHDATA_API_VERSION)) def api_root(): """API status""" # @todo: When we support multiple data sources this should keep track of their status # @todo: Add GHTorrent test to determine status ghtorrent_status = "good" # @todo: Add GitHub API status # @todo: Add PublicWWW API status return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status) ####################### # Timeseries # ####################### # @todo: Link to LF Metrics """ @api {get} /:owner/:repo/commits Commits by Week @apiName CommitsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "commits": 153 }, { "date": "2015-01-08T00:00:00.000Z", "commits": 192 } ] """ app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.commits)) """ @api {get} /:owner/:repo/forks Forks by Week @apiName ForksByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "forks": 13 }, { "date": "2015-01-08T00:00:00.000Z", "forks": 12 } ] """ app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.forks)) """ @api {get} /:owner/:repo/issues Issues by Week @apiName IssuesByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "issues":13 }, { "date": "2015-01-08T00:00:00.000Z", "issues":15 } ] """ app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.issues)) """ @api {get} /:owner/:repo/issues/response_time Issue Response Time @apiName IssueResponseTime @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "created_at": "2013-09-16T17:00:54.000Z", "responded_at": "2013-09-16T17:20:58.000Z" }, { "created_at": "2013-09-16T09:31:34.000Z", "responded_at": "2013-09-16T09:43:03.000Z" } ] """ app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time)) """ @api {get} /:owner/:repo/pulls Pull Requests by Week @apiName PullRequestsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "pull_requests": 1 "comments": 11 }, { "date": "2015-01-08T00:00:00.000Z", "pull_requests": 2 "comments": 31 } ] """ app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.pulls)) """ @api {get} /:owner/:repo/stargazers Stargazers by Week @apiName StargazersByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "watchers": 133 }, { "date": "2015-01-08T00:00:00.000Z", "watchers": 54 } ] """ app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers)) """ @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) @apiName Stargazers @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "rate": 0.5 }, { "date": "2015-01-08T00:00:00.000Z", "rate": 0.33 } ] """ app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate)) # Contribution Trends """ @api {get} /:owner/:repo/contributors Total Contributions by User @apiName TotalContributions @apiGroup Users @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "login": "foo", "location": "Springfield", "commits": 1337.0, "pull_requests": 60.0, "issues": null, "commit_comments": 158.0, "pull_request_comments": 718.0, "issue_comments": 1668.0 }, { "login": "bar", "location": null, "commits": 3968.0, "pull_requests": null, "issues": 12.0, "commit_comments": 158.0, "pull_request_comments": 718.0, "issue_comments": 1568.0 } ] """ app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.contributors)) ####################### # Contribution Trends # ####################### """ @api {get} /:owner/:repo/contributions Contributions by Week @apiName ContributionsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiParam (String) user Limit results to the given user's contributions @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "commits": 37.0, "pull_requests": null, "issues": null, "commit_comments": 7.0, "pull_request_comments": 8.0, "issue_comments": 17.0 }, { "date": "2015-01-08T00:00:00.000Z", "commits": 68.0, "pull_requests": null, "issues": 12.0, "commit_comments": 18.0, "pull_request_comments": 13.0, "issue_comments": 28.0 } ] """ @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) def contributions(owner, repo): repoid = ghtorrent.repoid(owner=owner, repo=repo) user = request.args.get('user') if (user): userid = ghtorrent.userid(username=user) contribs = ghtorrent.contributions(repoid=repoid, userid=userid) else: contribs = ghtorrent.contributions(repoid=repoid) return Response(response=contribs, status=200, mimetype="application/json") # Diversity """ @api {get} /:owner/:repo/commits/locations Commits and Location by User @apiName Stargazers @apiGroup Diversity @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "login": "bonnie", "location": "Rowena, TX", "commits": 12 }, { "login":"clyde", "location":"Ellis County, TX", "commits": 12 } ] """ app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations)) # Popularity """ @api {get} /:owner/:repo/linking_websites Linking Websites @apiDescription Returns an array of websites and their rank according to http://publicwww.com/ @apiName LinkingWebsites @apiGroup Popularity @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "url": "missouri.edu", "rank": "1" }, { "url": "unomaha.edu", "rank": "2" } ] """ app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites)) if (debugmode): print(" * Serving static routes") # Serve the front-end files in debug mode to make it easier for developers to work on the interface # @todo: Figure out why this isn't working. @app.route('/') def index(): root_dir = os.path.dirname(os.getcwd()) print(root_dir + '/ghdata/static') return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html') @app.route('/scripts/<path>') def send_scripts(path): root_dir = os.path.dirname(os.getcwd()) return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path) @app.route('/styles/<path>') def send_styles(path): root_dir = os.path.dirname(os.getcwd()) return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path) app.debug = True app.run(host=host, port=int(port), debug=debugmode) if __name__ == '__main__': run() Fix Windows error #SPDX-License-Identifier: MIT import ghdata import os import sys if (sys.version_info > (3, 0)): import configparser as configparser else: import ConfigParser as configparser from flask import Flask, request, Response, send_from_directory from flask_cors import CORS GHDATA_API_VERSION = 'unstable' def serialize(func, **args): """ Serailizes a function that returns a dataframe """ data = func(**args) if hasattr(data, 'to_json'): return data.to_json(orient='records', date_format='iso', date_unit='ms') else: return data def flaskify_ghtorrent(ghtorrent, func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ def generated_function(owner, repo): repoid = ghtorrent.repoid(owner=owner, repo=repo) return Response(response=serialize(func, repoid=repoid), status=200, mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function def flaskify(func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ def generated_function(owner, repo): return Response(response=serialize(func, owner=owner, repo=repo), status=200, mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function def read_config(parser, section, name, environment_variable, default): try: value = os.getenv(environment_variable, parser.get(section, name)) return value except: if not parser.has_section(section): parser.add_section(section) parser.set(section, name, default) with open('ghdata.cfg', 'w') as configfile: parser.write(configfile) return default def run(): app = Flask(__name__) CORS(app) # Try to open the config file and parse it parser = configparser.RawConfigParser() parser.read('ghdata.cfg') try: dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'), read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'), read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'), read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'), read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14') ) print("Connecting with " + dbstr) ghtorrent = ghdata.GHTorrent(dbstr=dbstr) except Exception as e: print("Failed to connect to database (" + str(e) + ")"); host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0') port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000') publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None')) github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None')) if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'): debugmode = True else: debugmode = False """ @api {get} / API Status @apiName Status @apiGroup Misc """ @app.route('/{}/'.format(GHDATA_API_VERSION)) def api_root(): """API status""" # @todo: When we support multiple data sources this should keep track of their status # @todo: Add GHTorrent test to determine status ghtorrent_status = "good" # @todo: Add GitHub API status # @todo: Add PublicWWW API status return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status) ####################### # Timeseries # ####################### # @todo: Link to LF Metrics """ @api {get} /:owner/:repo/commits Commits by Week @apiName CommitsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "commits": 153 }, { "date": "2015-01-08T00:00:00.000Z", "commits": 192 } ] """ app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.commits)) """ @api {get} /:owner/:repo/forks Forks by Week @apiName ForksByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "forks": 13 }, { "date": "2015-01-08T00:00:00.000Z", "forks": 12 } ] """ app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.forks)) """ @api {get} /:owner/:repo/issues Issues by Week @apiName IssuesByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "issues":13 }, { "date": "2015-01-08T00:00:00.000Z", "issues":15 } ] """ app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.issues)) """ @api {get} /:owner/:repo/issues/response_time Issue Response Time @apiName IssueResponseTime @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "created_at": "2013-09-16T17:00:54.000Z", "responded_at": "2013-09-16T17:20:58.000Z" }, { "created_at": "2013-09-16T09:31:34.000Z", "responded_at": "2013-09-16T09:43:03.000Z" } ] """ app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time)) """ @api {get} /:owner/:repo/pulls Pull Requests by Week @apiName PullRequestsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "pull_requests": 1 "comments": 11 }, { "date": "2015-01-08T00:00:00.000Z", "pull_requests": 2 "comments": 31 } ] """ app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.pulls)) """ @api {get} /:owner/:repo/stargazers Stargazers by Week @apiName StargazersByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "watchers": 133 }, { "date": "2015-01-08T00:00:00.000Z", "watchers": 54 } ] """ app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers)) """ @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) @apiName Stargazers @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "rate": 0.5 }, { "date": "2015-01-08T00:00:00.000Z", "rate": 0.33 } ] """ app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate)) # Contribution Trends """ @api {get} /:owner/:repo/contributors Total Contributions by User @apiName TotalContributions @apiGroup Users @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "login": "foo", "location": "Springfield", "commits": 1337.0, "pull_requests": 60.0, "issues": null, "commit_comments": 158.0, "pull_request_comments": 718.0, "issue_comments": 1668.0 }, { "login": "bar", "location": null, "commits": 3968.0, "pull_requests": null, "issues": 12.0, "commit_comments": 158.0, "pull_request_comments": 718.0, "issue_comments": 1568.0 } ] """ app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.contributors)) ####################### # Contribution Trends # ####################### """ @api {get} /:owner/:repo/contributions Contributions by Week @apiName ContributionsByWeek @apiGroup Timeseries @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiParam (String) user Limit results to the given user's contributions @apiSuccessExample {json} Success-Response: [ { "date": "2015-01-01T00:00:00.000Z", "commits": 37.0, "pull_requests": null, "issues": null, "commit_comments": 7.0, "pull_request_comments": 8.0, "issue_comments": 17.0 }, { "date": "2015-01-08T00:00:00.000Z", "commits": 68.0, "pull_requests": null, "issues": 12.0, "commit_comments": 18.0, "pull_request_comments": 13.0, "issue_comments": 28.0 } ] """ @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) def contributions(owner, repo): repoid = ghtorrent.repoid(owner=owner, repo=repo) user = request.args.get('user') if (user): userid = ghtorrent.userid(username=user) contribs = ghtorrent.contributions(repoid=repoid, userid=userid) else: contribs = ghtorrent.contributions(repoid=repoid) return Response(response=contribs, status=200, mimetype="application/json") # Diversity """ @api {get} /:owner/:repo/commits/locations Commits and Location by User @apiName Stargazers @apiGroup Diversity @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "login": "bonnie", "location": "Rowena, TX", "commits": 12 }, { "login":"clyde", "location":"Ellis County, TX", "commits": 12 } ] """ app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))( flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations)) # Popularity """ @api {get} /:owner/:repo/linking_websites Linking Websites @apiDescription Returns an array of websites and their rank according to http://publicwww.com/ @apiName LinkingWebsites @apiGroup Popularity @apiParam {String} owner Username of the owner of the GitHub repository @apiParam {String} repo Name of the GitHub repository @apiSuccessExample {json} Success-Response: [ { "url": "missouri.edu", "rank": "1" }, { "url": "unomaha.edu", "rank": "2" } ] """ app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites)) if (debugmode): print(" * Serving static routes") # Serve the front-end files in debug mode to make it easier for developers to work on the interface # @todo: Figure out why this isn't working. @app.route('/') def index(): root_dir = os.path.dirname(os.getcwd()) print(root_dir + '/ghdata/static') return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html') @app.route('/scripts/<path>') def send_scripts(path): root_dir = os.path.dirname(os.getcwd()) return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path) @app.route('/styles/<path>') def send_styles(path): root_dir = os.path.dirname(os.getcwd()) return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path) app.debug = True app.run(host=host, port=int(port), debug=debugmode) if __name__ == '__main__': run()
import openpathsampling as paths import numpy as np class SingleTrajectoryAnalysis(object): """Analyze a trajectory or set of trajectories for transition properties. Attributes ---------- dt : float time step between frames continuous_frames : dict dictionary mapping state to a list of number of frames continuously in that state from the analyzed trajectories. lifetime_frames : dict dictionary mapping state to a list of the number of frames to the trajectory lengths for calculating the lifetime. See Notes for more. transition_frames : dict dictionary mapping the transition tuple (initial_state, final_state) to a list of the number of frames involves in the transition and not in either state. flux_frames : dict TODO continuous_times : dict As with continuous frames, but durations multiplied by self.dt lifetimes : dict As with lifetime_frames, but durations multiplied by self.dt transitions_durations : dict As with transition_frames, but durations multiplied by self.dt Notes ----- LIFETIME: the """ def __init__(self, transition, dt=None): self.transition = transition self.dt = dt self.stateA = transition.stateA self.stateB = transition.stateB self.reset_analysis() def reset_analysis(self): """Reset the analysis by emptying all saved segments.""" self.continuous_segments = {self.stateA: [], self.stateB: []} self.lifetime_segments = {self.stateA: [], self.stateB: []} self.transition_segments = {(self.stateA, self.stateB): [], (self.stateB, self.stateA): []} self.flux_segments = {self.stateA: {'in': [], 'out': []}, self.stateB: {'in': [], 'out': []}} @property def continuous_frames(self): return {k: np.array([len(seg) for seg in self.continuous_segments[k]]) for k in self.continuous_segments.keys()} @property def continuous_times(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn continuous_frames = self.continuous_frames return {k : continuous_frames[k]*self.dt for k in continuous_frames.keys()} @property def lifetime_frames(self): return {k: np.array([len(seg) for seg in self.lifetime_segments[k]]) for k in self.lifetime_segments.keys()} @property def lifetimes(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn; use dt=1 otherwise lifetime_frames = self.lifetime_frames return {k : lifetime_frames[k]*self.dt for k in lifetime_frames.keys()} @property def transition_duration_frames(self): return {k: np.array([len(seg) for seg in self.transition_segments[k]]) for k in self.transition_segments.keys()} @property def transition_duration(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn; use dt=1 otherwise transition_duration_frames = self.transition_duration_frames return {k : transition_duration_frames[k]*self.dt for k in transition_duration_frames.keys()} def analyze_continuous_time(self, trajectory, state): ensemble = paths.AllInXEnsemble(state) self.continuous_segments[state] += ensemble.split(trajectory, overlap=0) def analyze_lifetime(self, trajectory, state): other_state = list(set([self.stateA, self.stateB]) - set([state]))[0] ensemble_BAB = paths.SequentialEnsemble([ paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1), paths.PartInXEnsemble(state) & paths.AllOutXEnsemble(other_state), paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1) ]) ensemble_AB = paths.SequentialEnsemble([ paths.AllInXEnsemble(state) & paths.LengthEnsemble(1), paths.OptionalEnsemble(paths.AllOutXEnsemble(other_state)), paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1) ]) BAB_split = ensemble_BAB.split(trajectory) AB_split = [ensemble_AB.split(part)[0] for part in BAB_split] self.lifetime_segments[state] += [subtraj[0:-1] for subtraj in AB_split] def analyze_transition_duration(self, trajectory, stateA, stateB): # we define the transitions ensemble just in case the transition is, # e.g., fixed path length TPS. We want flexible path length ensemble transition_ensemble = paths.SequentialEnsemble([ paths.AllInXEnsemble(stateA) & paths.LengthEnsemble(1), paths.OptionalEnsemble( # optional to allow instantaneous hops paths.AllOutXEnsemble(stateA) & paths.AllOutXEnsemble(stateB) ), paths.AllInXEnsemble(stateB) & paths.LengthEnsemble(1) ]) self.transition_segments[(stateA, stateB)] += [ seg[1:-1] for seg in transition_ensemble.split(trajectory) ] def analyze_flux(self, trajectory, state): other = list(set([self.stateA, self.stateB]) - set([state]))[0] counts_out = paths.SequentialEnsemble([ paths.AllInXEnsemble(state) & paths.LengthEnsemble(1), paths.AllOutXEnsemble(state | other), paths.AllInXEnsemble(state) & paths.LengthEnsemble(1) ]) counts_in = paths.SequentialEnsemble([ paths.AllOutXEnsemble(state | other) & paths.LengthEnsemble(1), paths.AllInXEnsemble(state), paths.AllOutXEnsemble(state | other) & paths.LengthEnsemble(1) ]) flux_out_segments = counts_out.split(trajectory) flux_in_segments = counts_in.split(trajectory) for seg in flux_in_segments: self.flux_segments[state]['in'] += [seg[1:-1]] for seg in flux_out_segments: self.flux_segments[state]['out'] += [seg[1:-1]] def analyze(self, trajectories): # TODO: I hate using isinstance, but I don't see another way if isinstance(trajectories, paths.Trajectory): trajectories = [trajectories] for traj in trajectories: for state in [self.stateA, self.stateB]: self.analyze_continuous_time(traj, state) self.analyze_lifetime(traj, state) self.analyze_flux(traj, state) self.analyze_transition_duration(traj, self.stateA, self.stateB) self.analyze_transition_duration(traj, self.stateB, self.stateA) # return self so we can init and analyze in one line return self # TODO: add a `summary` function to output a nice pandas frame or # something docstrings import openpathsampling as paths import numpy as np class SingleTrajectoryAnalysis(object): """Analyze a trajectory or set of trajectories for transition properties. Attributes ---------- dt : float time step between frames continuous_frames : dict dictionary mapping state to a list of number of frames continuously in that state from the analyzed trajectories. lifetime_frames : dict dictionary mapping state to a list of the number of frames to the trajectory lengths for calculating the lifetime. See Notes for more. transition_frames : dict dictionary mapping the transition tuple (initial_state, final_state) to a list of the number of frames involves in the transition and not in either state. flux_frames : dict dictionary mapping each state to a dictionary of with keys 'in' (for frames in the state) and 'out' (for frames outside the state) continuous_times : dict As with continuous frames, but durations multiplied by self.dt lifetimes : dict As with lifetime_frames, but durations multiplied by self.dt transitions_durations : dict As with transition_frames, but durations multiplied by self.dt """ def __init__(self, transition, dt=None): self.transition = transition self.dt = dt self.stateA = transition.stateA self.stateB = transition.stateB self.reset_analysis() def reset_analysis(self): """Reset the analysis by emptying all saved segments.""" self.continuous_segments = {self.stateA: [], self.stateB: []} self.lifetime_segments = {self.stateA: [], self.stateB: []} self.transition_segments = {(self.stateA, self.stateB): [], (self.stateB, self.stateA): []} self.flux_segments = {self.stateA: {'in': [], 'out': []}, self.stateB: {'in': [], 'out': []}} @property def continuous_frames(self): return {k: np.array([len(seg) for seg in self.continuous_segments[k]]) for k in self.continuous_segments.keys()} @property def continuous_times(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn continuous_frames = self.continuous_frames return {k : continuous_frames[k]*self.dt for k in continuous_frames.keys()} @property def lifetime_frames(self): return {k: np.array([len(seg) for seg in self.lifetime_segments[k]]) for k in self.lifetime_segments.keys()} @property def lifetimes(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn; use dt=1 otherwise lifetime_frames = self.lifetime_frames return {k : lifetime_frames[k]*self.dt for k in lifetime_frames.keys()} @property def transition_duration_frames(self): return {k: np.array([len(seg) for seg in self.transition_segments[k]]) for k in self.transition_segments.keys()} @property def transition_duration(self): if self.dt is None: # pragma: no cover raise RuntimeError("No time delta set") # TODO: this might become a logger.warn; use dt=1 otherwise transition_duration_frames = self.transition_duration_frames return {k : transition_duration_frames[k]*self.dt for k in transition_duration_frames.keys()} def analyze_continuous_time(self, trajectory, state): """Analysis to obtain continuous times for given state. Parameters ---------- trajectory : :class:`.Trajectory` trajectory to analyze state : :class:`.Volume` state volume to characterize. Must be one of the states in the transition """ ensemble = paths.AllInXEnsemble(state) self.continuous_segments[state] += ensemble.split(trajectory, overlap=0) def analyze_lifetime(self, trajectory, state): """Analysis to obtain lifetimes for given state. Parameters ---------- trajectory : :class:`.Trajectory` trajectory to analyze state : :class:`.Volume` state volume to characterize. Must be one of the states in the transition """ other_state = list(set([self.stateA, self.stateB]) - set([state]))[0] ensemble_BAB = paths.SequentialEnsemble([ paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1), paths.PartInXEnsemble(state) & paths.AllOutXEnsemble(other_state), paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1) ]) ensemble_AB = paths.SequentialEnsemble([ paths.AllInXEnsemble(state) & paths.LengthEnsemble(1), paths.OptionalEnsemble(paths.AllOutXEnsemble(other_state)), paths.AllInXEnsemble(other_state) & paths.LengthEnsemble(1) ]) BAB_split = ensemble_BAB.split(trajectory) AB_split = [ensemble_AB.split(part)[0] for part in BAB_split] self.lifetime_segments[state] += [subtraj[0:-1] for subtraj in AB_split] def analyze_transition_duration(self, trajectory, stateA, stateB): """Analysis to obtain transition durations for given state. Parameters ---------- trajectory : :class:`.Trajectory` trajectory to analyze stateA : :class:`.Volume` initial state volume for the transition stateB : :class:`.Volume` final state volume for the transition """ # we define the transitions ensemble just in case the transition is, # e.g., fixed path length TPS. We want flexible path length ensemble transition_ensemble = paths.SequentialEnsemble([ paths.AllInXEnsemble(stateA) & paths.LengthEnsemble(1), paths.OptionalEnsemble( # optional to allow instantaneous hops paths.AllOutXEnsemble(stateA) & paths.AllOutXEnsemble(stateB) ), paths.AllInXEnsemble(stateB) & paths.LengthEnsemble(1) ]) self.transition_segments[(stateA, stateB)] += [ seg[1:-1] for seg in transition_ensemble.split(trajectory) ] def analyze_flux(self, trajectory, state): """Analysis to obtain flux segments for given state. Parameters ---------- trajectory : :class:`.Trajectory` trajectory to analyze state : :class:`.Volume` state volume to characterize. Must be one of the states in the transition """ other = list(set([self.stateA, self.stateB]) - set([state]))[0] counts_out = paths.SequentialEnsemble([ paths.AllInXEnsemble(state) & paths.LengthEnsemble(1), paths.AllOutXEnsemble(state | other), paths.AllInXEnsemble(state) & paths.LengthEnsemble(1) ]) counts_in = paths.SequentialEnsemble([ paths.AllOutXEnsemble(state | other) & paths.LengthEnsemble(1), paths.AllInXEnsemble(state), paths.AllOutXEnsemble(state | other) & paths.LengthEnsemble(1) ]) flux_out_segments = counts_out.split(trajectory) flux_in_segments = counts_in.split(trajectory) for seg in flux_in_segments: self.flux_segments[state]['in'] += [seg[1:-1]] for seg in flux_out_segments: self.flux_segments[state]['out'] += [seg[1:-1]] def analyze(self, trajectories): """Full analysis of a trajectory or trajectories. Parameters ---------- trajectories : :class:`.Trajectory` or list of :class:`.Trajectory` """ # TODO: I hate using isinstance, but I don't see another way if isinstance(trajectories, paths.Trajectory): trajectories = [trajectories] for traj in trajectories: for state in [self.stateA, self.stateB]: self.analyze_continuous_time(traj, state) self.analyze_lifetime(traj, state) self.analyze_flux(traj, state) self.analyze_transition_duration(traj, self.stateA, self.stateB) self.analyze_transition_duration(traj, self.stateB, self.stateA) # return self so we can init and analyze in one line return self # TODO: add a `summary` function to output a nice pandas frame or # something
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, time from threading import Thread from PIL import Image from flask import current_app from app import db, imgs from app.models import Image as Img from app.models import Post from helpers.text import get_all_imgs _image_thread = None def jpeg_convert(infile): """ Try to convert and compress an image to jpeg""" f, e = os.path.splitext(infile) outfile = f + '.jpg' try: img = Image.open(infile) base_width = 932 w, h = img.size if w > base_width: ratio = base_width/w new_height = int(h*ratio) img = img.resize((base_width, new_height), Image.ANTIALIAS) img.save(outfile, dpi=[100,100], quality=80) except IOError: return os.path.basename(infile) return os.path.basename(outfile) def crop_image(infile): """ Crop an image. Check width and height, and crop according to the lower parameter from the center""" f, e = os.path.splitext(infile) outfile = f + '_crop' + e original = Image.open(infile) w, h = original.size max_width = 152 if w < h: l = 0 r = w t = (h // 2) - (w // 2) b = (h // 2) + (w // 2) elif h < w: l = (w // 2) - (h // 2) r = (w // 2) + (h // 2) t = 0 b = h cropped = original.crop((l,t,r,b)) w, h = cropped.size if w > max_width and h > max_width: cropped = cropped.resize((max_width, max_width), Image.ANTIALIAS) cropped.save(outfile) def remove_images(app): from datetime import datetime while True: time.sleep(10) conf = app.config['IMAGE_DELETE'] with app.app_context(): if ( datetime.utcnow().hour in conf['TIME_OF_DAY'] and datetime.utcnow().weekday() in conf['WEEKDAY'] ): images = Img.get_all_imgs() db_imgs = [img.location + img.filename for img in images] posts = Post.get_all() post_imgs = get_all_imgs((post.body_html for post in posts)) diff_imgs = set(db_imgs) - set(post_imgs) if diff_imgs: app.logger.debug('Images found in db: {}'.format(db_imgs)) app.logger.debug('Images found in posts: {}'.format(db_imgs)) app.logger.debug('Images to delete: {}'.format(db_imgs)) for i in images: if i.location + i.filename in diff_imgs: if os.path.isfile(imgs.path(i.filename)): os.remove(imgs.path(i.filename)) f, e = os.path.splitext(i.filename) if os.path.isfile(imgs.path(f + '_crop' + e)): os.remove(imgs.path(f + '_crop' + e)) db.session.delete(i) db.session.commit() def start_image_deletion_thread(): if not current_app.config['TESTING']: global _image_thread if _image_thread is None: _image_thread = Thread(target=remove_images, args=[current_app._get_current_object()]) current_app.logger.debug('Starting image deletion thread') _image_thread.start() Add logging to the rest of the helper module #!/usr/bin/env python # -*- coding: utf-8 -*- import os, time from threading import Thread from PIL import Image from flask import current_app from app import db, imgs from app.models import Image as Img from app.models import Post from helpers.text import get_all_imgs _image_thread = None def jpeg_convert(infile): """ Try to convert and compress an image to jpeg""" f, e = os.path.splitext(infile) outfile = f + '.jpg' try: img = Image.open(infile) base_width = 932 w, h = img.size if w > base_width: ratio = base_width/w new_height = int(h*ratio) img = img.resize((base_width, new_height), Image.ANTIALIAS) img.save(outfile, dpi=[100,100], quality=80) except IOError: current_app.logger.exception('Could not save file: ') return os.path.basename(infile) return os.path.basename(outfile) def crop_image(infile): """ Crop an image. Check width and height, and crop according to the lower parameter from the center""" f, e = os.path.splitext(infile) outfile = f + '_crop' + e original = Image.open(infile) w, h = original.size max_width = 152 if w < h: l = 0 r = w t = (h // 2) - (w // 2) b = (h // 2) + (w // 2) elif h < w: l = (w // 2) - (h // 2) r = (w // 2) + (h // 2) t = 0 b = h cropped = original.crop((l,t,r,b)) w, h = cropped.size if w > max_width and h > max_width: cropped = cropped.resize((max_width, max_width), Image.ANTIALIAS) cropped.save(outfile) def remove_images(app): from datetime import datetime while True: time.sleep(10) conf = app.config['IMAGE_DELETE'] with app.app_context(): if ( datetime.utcnow().hour in conf['TIME_OF_DAY'] and datetime.utcnow().weekday() in conf['WEEKDAY'] ): images = Img.get_all_imgs() db_imgs = [img.location + img.filename for img in images] posts = Post.get_all() post_imgs = get_all_imgs((post.body_html for post in posts)) diff_imgs = set(db_imgs) - set(post_imgs) if diff_imgs: app.logger.debug('Images found in db: {}'.format(db_imgs)) app.logger.debug('Images found in posts: {}'.format(db_imgs)) app.logger.debug('Images to delete: {}'.format(db_imgs)) for i in images: if i.location + i.filename in diff_imgs: if os.path.isfile(imgs.path(i.filename)): os.remove(imgs.path(i.filename)) f, e = os.path.splitext(i.filename) if os.path.isfile(imgs.path(f + '_crop' + e)): os.remove(imgs.path(f + '_crop' + e)) db.session.delete(i) db.session.commit() def start_image_deletion_thread(): if not current_app.config['TESTING']: global _image_thread if _image_thread is None: _image_thread = Thread(target=remove_images, args=[current_app._get_current_object()]) current_app.logger.debug('Starting image deletion thread') _image_thread.start()
import logging import asyncio import time import random import collections import discord import markovify from discord.ext import commands from .common import Cog log = logging.getLogger(__name__) def make_textmodel(texter, data): texter.model = markovify.NewlineText(data, texter.chain_length) async def make_texter(chain_length, data, texter_id): texter = Texter(texter_id, chain_length) await texter.fill(data) return texter class Texter: """Texter - Main texter class. This class holds information about a markov chain generator. """ __slots__ = ('loop', 'id', 'refcount', 'chain_length', 'model') def __init__(self, texter_id, chain_length=1, loop=None): if loop is None: loop = asyncio.get_event_loop() self.id = texter_id self.refcount = 1 self.chain_length = chain_length self.loop = loop self.model = None def __repr__(self): return f'Texter(refcount={self.refcount})' async def fill(self, data): """Fill a texter with its text model.""" t_start = time.monotonic() future_textmodel = self.loop.run_in_executor(None, make_textmodel, self, data) await future_textmodel delta = round((time.monotonic() - t_start) * 1000, 2) log.info(f"Texter.fill: {delta}ms") def _sentence(self, char_limit): """Get a sentence from a initialized texter.""" text = 'None' if char_limit is not None: text = self.model.make_short_sentence(char_limit) else: text = self.model.make_sentence() return text async def sentence(self, char_limit=None): """Get a sentence from a initialized texter.""" if self.refcount <= 4: # max value refcount can be is 5 self.refcount += 1 res = None count = 0 while res is None: if count > 3: break future_sentence = self.loop.run_in_executor(None, self._sentence, char_limit) res = await future_sentence count += 1 return str(res) def clear(self): del self.model, self.id, self.refcount, self.chain_length, self.loop class Speak(Cog): def __init__(self, bot): super().__init__(bot) self.text_generators = {} self.generating = {} self.coll_task = self.bot.loop.create_task(self.coll_task_func()) async def coll_task_func(self): try: while True: await self.texter_collection() await asyncio.sleep(360) except asyncio.CancelledError: pass async def texter_collection(self): """Free memory by collecting unused Texters.""" amount = len(self.text_generators) if amount < 1: return t_start = time.monotonic() cleaned = 0 for texter in list(self.text_generators.values()): if texter.refcount < 1: texter.clear() cleaned += 1 del self.text_generators[texter.id] else: texter.refcount -= 1 t_end = time.monotonic() if cleaned > 0: delta = round((t_end - t_start) * 1000, 2) log.info(f'{amount} -> {amount - cleaned} in {delta}ms') async def get_messages(self, guild, amount=2000) -> list: channel_id = await self.config.cfg_get(guild, 'speak_channel') channel = guild.get_channel(channel_id) if channel is None: channel = guild.default_channel self.generating[guild.id] = True try: messages = [] async for message in channel.history(limit=amount): author = message.author if author == self.bot.user: continue if author.bot: continue messages.append(message.clean_content) self.generating[guild.id] = False return messages except discord.Forbidden: log.info(f'got Forbidden from {guild.id} when making message history') self.generating[guild.id] = False return ['None'] async def get_messages_str(self, guild, amount=2000): m = await self.get_messages(guild, amount) return '\n'.join(m) async def new_texter(self, guild): guild_messages = await self.get_messages_str(guild) new_texter = await make_texter(1, guild_messages, guild.id) self.text_generators[guild.id] = new_texter async def get_texter(self, guild): if guild.id not in self.text_generators: await self.new_texter(guild) return self.text_generators[guild.id] async def make_sentence(self, ctx, char_limit=None): with ctx.typing(): texter = await self.get_texter(ctx.guild) sentence = await texter.sentence(char_limit) return sentence async def on_message(self, message): ctx = await self.bot.get_context(message) if message.author.bot: return if not isinstance(ctx.channel, discord.TextChannel): return prob = await self.config.cfg_get(ctx.guild, 'autoreply_prob') if prob is None: log.warning('[autoreply] how can autoreply_prob be none??') return force = False for prefix in self.bot.config.SPEAK_PREFIXES: if message.content.startswith(prefix): log.info('[autoreply] forcing from speak prefix') force = True if not force: if random.random() > prob: return if self.generating.get(ctx.guild.id): return sentence = await self.make_sentence(ctx) await ctx.send(sentence) @commands.command() @commands.is_owner() async def texclean(self, ctx, amount: int = 1): """Clean texters.""" before = len(self.text_generators) t_start = time.monotonic() for i in range(amount): await self.texter_collection() after = len(self.text_generators) t_end = time.monotonic() delta = round((t_end - t_start) * 1000, 2) await ctx.send(f"`{before} => {after}, cleaned {before-after}, took {delta}ms`") @commands.command() @commands.is_owner() async def ntexter(self, ctx, guild_id: int = None): """Create a new texter for a guild, overwrites existing one""" if guild_id is None: guild_id = ctx.guild.id guild = self.bot.get_guild(guild_id) t1 = time.monotonic() await self.new_texter(guild) t2 = time.monotonic() delta = round((t2 - t1), 2) await ctx.send(f'Took {delta} seconds loading texter.') @commands.command(aliases=['spt']) @commands.guild_only() async def speaktrigger(self, ctx): """Force your Texter to say a sentence. If the texter is still being generated, this command does nothing while it isn't completly generated. """ if self.generating.get(ctx.guild.id): return sentence = await self.make_sentence(ctx) await ctx.send(sentence) @commands.command(hidden=True) async def covfefe(self, ctx): """covfefe.""" await ctx.send("Despite the constant negative press covfefe.") @commands.command(aliases=['jw']) @commands.guild_only() async def jwormhole(self, ctx): """lul wormhole""" res = await self.make_sentence(ctx) await ctx.send(f'<@127296623779774464> wormhole send {res}') @commands.command() @commands.guild_only() async def madlibs(self, ctx, *, inputstr: str): """Changes any "---" in the input to a 12-letter generated sentence""" inputstr = inputstr.replace('@everyone', '@\u200beveryone') inputstr = inputstr.replace('@here', '@\u200bhere') splitted = inputstr.split() if splitted.count('---') < 1: await ctx.send(":no_entry_sign: you can't just make josé say whatever you want! :no_entry_sign:") return if splitted.count('---') > 5: await ctx.send("thats a .......... lot") return res = [] for word in splitted: if word == '---': res.append(await self.make_sentence(ctx, 12)) else: res.append(word) await ctx.send(' '.join(res)) @commands.command() @commands.is_owner() async def txstress(self, ctx): """Stress test texters LUL""" t1 = time.monotonic() txs = [(await self.new_texter(guild)) for guild in self.bot.guilds] t2 = time.monotonic() delta = round((t2 - t1), 2) await ctx.send(f'Generated {len(txs)} in {delta}') @commands.command() async def txstat(self, ctx): """Show statistics about all texters""" tg = self.text_generators refcounts = collections.Counter() for gid, tx in tg.items(): refcounts[tx.refcount] += 1 res = ['refcount | texters'] res += [f'{r} | {txc}' for (r, txc) in refcounts.most_common()] res = '\n'.join(res) await ctx.send(f'```{res}```') def setup(bot): bot.add_cog(Speak(bot)) speak: do not delete texter.id import logging import asyncio import time import random import collections import discord import markovify from discord.ext import commands from .common import Cog log = logging.getLogger(__name__) def make_textmodel(texter, data): texter.model = markovify.NewlineText(data, texter.chain_length) async def make_texter(chain_length, data, texter_id): texter = Texter(texter_id, chain_length) await texter.fill(data) return texter class Texter: """Texter - Main texter class. This class holds information about a markov chain generator. """ __slots__ = ('loop', 'id', 'refcount', 'chain_length', 'model') def __init__(self, texter_id, chain_length=1, loop=None): if loop is None: loop = asyncio.get_event_loop() self.id = texter_id self.refcount = 1 self.chain_length = chain_length self.loop = loop self.model = None def __repr__(self): return f'Texter(refcount={self.refcount})' async def fill(self, data): """Fill a texter with its text model.""" t_start = time.monotonic() future_textmodel = self.loop.run_in_executor(None, make_textmodel, self, data) await future_textmodel delta = round((time.monotonic() - t_start) * 1000, 2) log.info(f"Texter.fill: {delta}ms") def _sentence(self, char_limit): """Get a sentence from a initialized texter.""" text = 'None' if char_limit is not None: text = self.model.make_short_sentence(char_limit) else: text = self.model.make_sentence() return text async def sentence(self, char_limit=None): """Get a sentence from a initialized texter.""" if self.refcount <= 4: # max value refcount can be is 5 self.refcount += 1 res = None count = 0 while res is None: if count > 3: break future_sentence = self.loop.run_in_executor(None, self._sentence, char_limit) res = await future_sentence count += 1 return str(res) def clear(self): del self.model, self.refcount, self.chain_length, self.loop class Speak(Cog): def __init__(self, bot): super().__init__(bot) self.text_generators = {} self.generating = {} self.coll_task = self.bot.loop.create_task(self.coll_task_func()) async def coll_task_func(self): try: while True: await self.texter_collection() await asyncio.sleep(360) except asyncio.CancelledError: pass async def texter_collection(self): """Free memory by collecting unused Texters.""" amount = len(self.text_generators) if amount < 1: return t_start = time.monotonic() cleaned = 0 for texter in list(self.text_generators.values()): if texter.refcount < 1: texter.clear() cleaned += 1 del self.text_generators[texter.id] else: texter.refcount -= 1 t_end = time.monotonic() if cleaned > 0: delta = round((t_end - t_start) * 1000, 2) log.info(f'{amount} -> {amount - cleaned} in {delta}ms') async def get_messages(self, guild, amount=2000) -> list: channel_id = await self.config.cfg_get(guild, 'speak_channel') channel = guild.get_channel(channel_id) if channel is None: channel = guild.default_channel self.generating[guild.id] = True try: messages = [] async for message in channel.history(limit=amount): author = message.author if author == self.bot.user: continue if author.bot: continue messages.append(message.clean_content) self.generating[guild.id] = False return messages except discord.Forbidden: log.info(f'got Forbidden from {guild.id} when making message history') self.generating[guild.id] = False return ['None'] async def get_messages_str(self, guild, amount=2000): m = await self.get_messages(guild, amount) return '\n'.join(m) async def new_texter(self, guild): guild_messages = await self.get_messages_str(guild) new_texter = await make_texter(1, guild_messages, guild.id) self.text_generators[guild.id] = new_texter async def get_texter(self, guild): if guild.id not in self.text_generators: await self.new_texter(guild) return self.text_generators[guild.id] async def make_sentence(self, ctx, char_limit=None): with ctx.typing(): texter = await self.get_texter(ctx.guild) sentence = await texter.sentence(char_limit) return sentence async def on_message(self, message): ctx = await self.bot.get_context(message) if message.author.bot: return if not isinstance(ctx.channel, discord.TextChannel): return prob = await self.config.cfg_get(ctx.guild, 'autoreply_prob') if prob is None: log.warning('[autoreply] how can autoreply_prob be none??') return force = False for prefix in self.bot.config.SPEAK_PREFIXES: if message.content.startswith(prefix): log.info('[autoreply] forcing from speak prefix') force = True if not force: if random.random() > prob: return if self.generating.get(ctx.guild.id): return sentence = await self.make_sentence(ctx) await ctx.send(sentence) @commands.command() @commands.is_owner() async def texclean(self, ctx, amount: int = 1): """Clean texters.""" before = len(self.text_generators) t_start = time.monotonic() for i in range(amount): await self.texter_collection() after = len(self.text_generators) t_end = time.monotonic() delta = round((t_end - t_start) * 1000, 2) await ctx.send(f"`{before} => {after}, cleaned {before-after}, took {delta}ms`") @commands.command() @commands.is_owner() async def ntexter(self, ctx, guild_id: int = None): """Create a new texter for a guild, overwrites existing one""" if guild_id is None: guild_id = ctx.guild.id guild = self.bot.get_guild(guild_id) t1 = time.monotonic() await self.new_texter(guild) t2 = time.monotonic() delta = round((t2 - t1), 2) await ctx.send(f'Took {delta} seconds loading texter.') @commands.command(aliases=['spt']) @commands.guild_only() async def speaktrigger(self, ctx): """Force your Texter to say a sentence. If the texter is still being generated, this command does nothing while it isn't completly generated. """ if self.generating.get(ctx.guild.id): return sentence = await self.make_sentence(ctx) await ctx.send(sentence) @commands.command(hidden=True) async def covfefe(self, ctx): """covfefe.""" await ctx.send("Despite the constant negative press covfefe.") @commands.command(aliases=['jw']) @commands.guild_only() async def jwormhole(self, ctx): """lul wormhole""" res = await self.make_sentence(ctx) await ctx.send(f'<@127296623779774464> wormhole send {res}') @commands.command() @commands.guild_only() async def madlibs(self, ctx, *, inputstr: str): """Changes any "---" in the input to a 12-letter generated sentence""" inputstr = inputstr.replace('@everyone', '@\u200beveryone') inputstr = inputstr.replace('@here', '@\u200bhere') splitted = inputstr.split() if splitted.count('---') < 1: await ctx.send(":no_entry_sign: you can't just make josé say whatever you want! :no_entry_sign:") return if splitted.count('---') > 5: await ctx.send("thats a .......... lot") return res = [] for word in splitted: if word == '---': res.append(await self.make_sentence(ctx, 12)) else: res.append(word) await ctx.send(' '.join(res)) @commands.command() @commands.is_owner() async def txstress(self, ctx): """Stress test texters LUL""" t1 = time.monotonic() txs = [(await self.new_texter(guild)) for guild in self.bot.guilds] t2 = time.monotonic() delta = round((t2 - t1), 2) await ctx.send(f'Generated {len(txs)} in {delta}') @commands.command() async def txstat(self, ctx): """Show statistics about all texters""" tg = self.text_generators refcounts = collections.Counter() for gid, tx in tg.items(): refcounts[tx.refcount] += 1 res = ['refcount | texters'] res += [f'{r} | {txc}' for (r, txc) in refcounts.most_common()] res = '\n'.join(res) await ctx.send(f'```{res}```') def setup(bot): bot.add_cog(Speak(bot))
""" controllers.py Point module controllers. """ from flask import Blueprint, request, Response, abort, jsonify from werkzeug.exceptions import BadRequest from datetime import datetime import logging from app.decorators import ignore_exception from app.mod_point.models import Point from app.mod_auth import oauth from app.decorators import crossdomain mod_point = Blueprint('point', __name__, url_prefix='/api/v1/point') sfloat = ignore_exception(TypeError)(float) sint = ignore_exception(TypeError)(int) sbool = ignore_exception(TypeError)(bool) @mod_point.route('/<adventure_slug>/<point_type>', methods=['GET']) @crossdomain(origin='*') def list_point(adventure_slug, point_type): points = Point.objects(adventure=adventure_slug, type=point_type) return Response(points.to_json(), mimetype='application/json') # TODO: remove $oid references from json output @mod_point.route('/<point_id>', methods=['GET']) @crossdomain(origin='*') def get_point(point_id): point = Point.objects.get(id=point_id) return jsonify(point.to_dict()) @mod_point.route('/<point_id>', methods=['PUT']) @crossdomain(origin='*') @oauth.require_oauth('email') def update_point(point_id): point = Point.objects.get(id=point_id) try: update = { 'set__title': request.values.get('title', None), 'set__latitude': sfloat(request.values.get('latitude', None)), 'set__longitude': sfloat(request.values.get('longitude', None)), 'set__desc': request.values.get('desc', None), 'set__resource': request.values.get('resource', None), 'set__thumb': request.values.get('thumb', None), 'set__photo': request.values.get('photo', None), 'set__video': request.values.get('video', None), 'set__timestamp': datetime.strptime(request.values.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")), "%Y-%m-%dT%H:%M:%S.%fZ"), 'set__hide': sbool(request.values.get('hide', None)), 'set__delorme_id': sint(request.values.get('delorme_id', None)) } point.update(**update) point.reload() return jsonify(point.to_dict()) except TypeError as e: logging.error(e) abort(400) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return @mod_point.route('/<adventure_slug>/<point_type>', methods=['POST']) @crossdomain(origin='*') @oauth.require_oauth('email') def add_point(adventure_slug, point_type): try: point = Point( title=request.values.get('title', None), latitude=sfloat(request.values.get('latitude', None)), longitude=sfloat(request.values.get('longitude', None)), desc=request.values.get('desc', None), resource=request.values.get('resource', None), timestamp=datetime.strptime(request.values.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")), "%Y-%m-%dT%H:%M:%S.%fZ"), thumb=request.values.get('thumb', None), photo=request.values.get('photo', None), video=request.values.get('video', None), hide=sbool(request.values.get('hide', None)), delorme_id=sint(request.values.get('delorme_id', None)), type=point_type, adventure=adventure_slug ) point.save() return jsonify(point.to_dict()) except ValueError as e: logging.error(e) abort(400) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return @mod_point.route('/<point_id>', methods=['DELETE']) @crossdomain(origin='*') @oauth.require_oauth('email') def delete_point(point_id): point = Point.objects.get(id=point_id) try: point.delete() return jsonify(point.to_dict()) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return Improving point list response. """ controllers.py Point module controllers. """ from flask import Blueprint, request, abort, jsonify from werkzeug.exceptions import BadRequest from datetime import datetime import logging from app.decorators import ignore_exception from app.mod_point.models import Point from app.mod_auth import oauth from app.decorators import crossdomain mod_point = Blueprint('point', __name__, url_prefix='/api/v1/point') sfloat = ignore_exception(TypeError)(float) sint = ignore_exception(TypeError)(int) sbool = ignore_exception(TypeError)(bool) @mod_point.route('/<adventure_slug>/<point_type>', methods=['GET']) @crossdomain(origin='*') def list_point(adventure_slug, point_type): points = Point.objects(adventure=adventure_slug, type=point_type) points_dict = [] for point in points: points_dict.append(point.to_dict()) return jsonify(points=points_dict) @mod_point.route('/<point_id>', methods=['GET']) @crossdomain(origin='*') def get_point(point_id): point = Point.objects.get(id=point_id) return jsonify(point.to_dict()) @mod_point.route('/<point_id>', methods=['PUT']) @crossdomain(origin='*') @oauth.require_oauth('email') def update_point(point_id): point = Point.objects.get(id=point_id) try: update = { 'set__title': request.values.get('title', None), 'set__latitude': sfloat(request.values.get('latitude', None)), 'set__longitude': sfloat(request.values.get('longitude', None)), 'set__desc': request.values.get('desc', None), 'set__resource': request.values.get('resource', None), 'set__thumb': request.values.get('thumb', None), 'set__photo': request.values.get('photo', None), 'set__video': request.values.get('video', None), 'set__timestamp': datetime.strptime(request.values.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")), "%Y-%m-%dT%H:%M:%S.%fZ"), 'set__hide': sbool(request.values.get('hide', None)), 'set__delorme_id': sint(request.values.get('delorme_id', None)) } point.update(**update) point.reload() return jsonify(point.to_dict()) except TypeError as e: logging.error(e) abort(400) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return @mod_point.route('/<adventure_slug>/<point_type>', methods=['POST']) @crossdomain(origin='*') @oauth.require_oauth('email') def add_point(adventure_slug, point_type): try: point = Point( title=request.values.get('title', None), latitude=sfloat(request.values.get('latitude', None)), longitude=sfloat(request.values.get('longitude', None)), desc=request.values.get('desc', None), resource=request.values.get('resource', None), timestamp=datetime.strptime(request.values.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")), "%Y-%m-%dT%H:%M:%S.%fZ"), thumb=request.values.get('thumb', None), photo=request.values.get('photo', None), video=request.values.get('video', None), hide=sbool(request.values.get('hide', None)), delorme_id=sint(request.values.get('delorme_id', None)), type=point_type, adventure=adventure_slug ) point.save() return jsonify(point.to_dict()) except ValueError as e: logging.error(e) abort(400) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return @mod_point.route('/<point_id>', methods=['DELETE']) @crossdomain(origin='*') @oauth.require_oauth('email') def delete_point(point_id): point = Point.objects.get(id=point_id) try: point.delete() return jsonify(point.to_dict()) except BadRequest as e: logging.error(e) abort(400) except Exception as e: logging.error(e) abort(500) return
# Copyright (c) 2010-2013, GEM Foundation. # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. """ Core calculator functionality for computing stochastic event sets and ground motion fields using the 'event-based' method. Stochastic events sets (which can be thought of as collections of ruptures) are computed iven a set of seismic sources and investigation time span (in years). For more information on computing stochastic event sets, see :mod:`openquake.hazardlib.calc.stochastic`. One can optionally compute a ground motion field (GMF) given a rupture, a site collection (which is a collection of geographical points with associated soil parameters), and a ground shaking intensity model (GSIM). For more information on computing ground motion fields, see :mod:`openquake.hazardlib.calc.gmf`. """ import time import random import collections import numpy.random from django.db import transaction from openquake.hazardlib.calc import gmf from openquake.hazardlib.imt import from_string from openquake.engine import logs, writer from openquake.engine.calculators.hazard import general from openquake.engine.calculators.hazard.classical import ( post_processing as cls_post_proc) from openquake.engine.calculators.hazard.event_based import post_processing from openquake.engine.db import models from openquake.engine.utils import tasks from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor #: Always 1 for the computation of ground motion fields in the event-based #: hazard calculator. DEFAULT_GMF_REALIZATIONS = 1 # NB: beware of large caches inserter = writer.CacheInserter(models.GmfData, 1000) @tasks.oqtask def compute_ses_and_gmfs(job_id, src_seeds, gsims_by_rlz, task_no): """ Celery task for the stochastic event set calculator. Samples logic trees and calls the stochastic event set calculator. Once stochastic event sets are calculated, results will be saved to the database. See :class:`openquake.engine.db.models.SESCollection`. Optionally (specified in the job configuration using the `ground_motion_fields` parameter), GMFs can be computed from each rupture in each stochastic event set. GMFs are also saved to the database. :param int job_id: ID of the currently running job. :param src_seeds: List of pairs (source, seed) :params gsims_by_rlz: dictionary of GSIM :param task_no: an ordinal so that GMV can be collected in a reproducible order """ rlz_ids = [r.id for r in gsims_by_rlz] ses_coll = models.SESCollection.objects.get(lt_realization_ids=rlz_ids) hc = models.HazardCalculation.objects.get(oqjob=job_id) all_ses = models.SES.objects.filter(ses_collection=ses_coll) imts = map(from_string, hc.intensity_measure_types) params = dict( correl_model=general.get_correl_model(hc), truncation_level=hc.truncation_level, maximum_distance=hc.maximum_distance, num_sites=len(hc.site_collection)) collector = GmfCollector( [s.id for s in hc.site_collection], params, imts, gsims_by_rlz) mon1 = LightMonitor('filtering sites', job_id, compute_ses_and_gmfs) mon2 = LightMonitor('generating ruptures', job_id, compute_ses_and_gmfs) mon3 = LightMonitor('filtering ruptures', job_id, compute_ses_and_gmfs) mon4 = LightMonitor('saving ses', job_id, compute_ses_and_gmfs) mon5 = LightMonitor('computing gmfs', job_id, compute_ses_and_gmfs) # Compute and save stochastic event sets rnd = random.Random() for src, seed in src_seeds: t0 = time.time() rnd.seed(seed) with mon1: # filtering sources s_sites = src.filter_sites_by_distance_to_source( hc.maximum_distance, hc.site_collection ) if hc.maximum_distance else hc.site_collection if s_sites is None: continue # NB: the number of occurrences is very low, << 1, so it is # more efficient to filter only the ruptures that occur # and not to compute the occurrencies of the filtered ruptures # the dictionary `ses_num_occ` contains [(ses, num_occurrences)] # for each occurring rupture for each ses in the ses collection ses_num_occ = collections.defaultdict(list) with mon2: # generating ruptures for rup in src.iter_ruptures(): for ses in all_ses: numpy.random.seed(rnd.randint(0, models.MAX_SINT_32)) num_occurrences = rup.sample_number_of_occurrences() if num_occurrences: ses_num_occ[rup].append((ses, num_occurrences)) for rup in ses_num_occ: with mon3: # filtering ruptures r_sites = rup.source_typology.\ filter_sites_by_distance_to_rupture( rup, hc.maximum_distance, s_sites ) if hc.maximum_distance else s_sites if r_sites is None: continue # saving ses and generating gmf for ses, num_occurrences in ses_num_occ[rup]: for occ in range(1, num_occurrences + 1): with mon4: # saving ruptures rup_id = models.SESRupture.objects.create( ses=ses, rupture=rup, tag='smlt=%02d|ses=%04d|src=%s|occ=%02d' % (ses_coll.ordinal, ses.ordinal, src.source_id, occ), hypocenter=rup.hypocenter.wkt2d, magnitude=rup.mag).id if hc.ground_motion_fields: with mon5: # computing GMFs rup_seed = rnd.randint(0, models.MAX_SINT_32) collector.calc_gmf(r_sites, rup, rup_id, rup_seed) num_ruptures = sum(occ for ses, occ in ses_num_occ[rup] for rup in ses_num_occ) logs.LOG.info('job=%d, src=%s:%s, num_ruptures=%d, calc_time=%fs', job_id, src.source_id, src.__class__.__name__, num_ruptures, time.time() - t0) mon1.flush() mon2.flush() mon3.flush() mon4.flush() mon5.flush() if hc.ground_motion_fields: with EnginePerformanceMonitor( 'saving gmfs', job_id, compute_ses_and_gmfs): collector.save_gmfs(task_no) class GmfCollector(object): """ A class to compute and save ground motion fields. """ def __init__(self, site_ids, params, imts, gsims_by_rlz): self.site_ids = site_ids self.params = params self.imts = imts self.gsims_by_rlz = gsims_by_rlz self.gmvs_per_site = collections.defaultdict(list) self.ruptures_per_site = collections.defaultdict(list) def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed): """ Compute the GMF generated by the given rupture on the given sites and collect the values in the dictionaries .gmvs_per_site and .ruptures_per_site. """ triples = [(rupture, rupture_id, rupture_seed)] for rlz, gsims in self.gsims_by_rlz.items(): for imt, idx, gmv, rup_id in _compute_gmf( self.params, self.imts, gsims, r_sites, triples): if gmv: site_id = self.site_ids[idx] self.gmvs_per_site[rlz, imt, site_id].append(gmv) self.ruptures_per_site[rlz, imt, site_id].append(rup_id) @transaction.commit_on_success(using='job_init') def save_gmfs(self, task_no): """ Helper method to save the computed GMF data to the database. :param task_no: The ordinal of the task which generated the current GMFs to save """ for rlz, imt, site_id in self.gmvs_per_site: imt_name, sa_period, sa_damping = imt inserter.add(models.GmfData( gmf=models.Gmf.objects.get(lt_realization=rlz), task_no=task_no, imt=imt_name, sa_period=sa_period, sa_damping=sa_damping, site_id=site_id, gmvs=self.gmvs_per_site[rlz, imt, site_id], rupture_ids=self.ruptures_per_site[rlz, imt, site_id])) inserter.flush() self.gmvs_per_site.clear() self.ruptures_per_site.clear() # NB: I tried to return a single dictionary {site_id: [(gmv, rupt_id),...]} # but it takes a lot more memory (MS) def _compute_gmf(params, imts, gsims, site_coll, rupture_id_seed_triples): """ Compute a ground motion field value for each rupture, for all the points affected by that rupture, for the given IMT. Returns a dictionary with the nonzero contributions to each site id, and a dictionary with the ids of the contributing ruptures for each site id. assert len(ruptures) == len(rupture_seeds) :param params: a dictionary containing the keys correl_model, truncation_level, maximum_distance :param imts: a list of hazardlib intensity measure types :param gsims: a dictionary {tectonic region type -> GSIM instance} :param site_coll: a SiteCollection instance :param rupture_id_seed_triple: a list of triples with types (:class:`openquake.hazardlib.source.rupture.Rupture`, int, int) """ # Compute and save ground motion fields for rupture, rup_id, rup_seed in rupture_id_seed_triples: gmf_calc_kwargs = { 'rupture': rupture, 'sites': site_coll, 'imts': imts, 'gsim': gsims[rupture.tectonic_region_type], 'truncation_level': params['truncation_level'], 'realizations': DEFAULT_GMF_REALIZATIONS, 'correlation_model': params['correl_model'], 'num_sites': params['num_sites'], } numpy.random.seed(rup_seed) gmf_dict = gmf.ground_motion_fields(**gmf_calc_kwargs) for imt, gmf_1_realiz in gmf_dict.iteritems(): # since DEFAULT_GMF_REALIZATIONS is 1, gmf_1_realiz is a matrix # with n_sites rows and 1 column for idx, gmv in enumerate(gmf_1_realiz): # convert a 1x1 matrix into a float yield imt, idx, float(gmv), rup_id class EventBasedHazardCalculator(general.BaseHazardCalculator): """ Probabilistic Event-Based hazard calculator. Computes stochastic event sets and (optionally) ground motion fields. """ core_calc_task = compute_ses_and_gmfs def task_arg_gen(self, _block_size=None): """ Loop through realizations and sources to generate a sequence of task arg tuples. Each tuple of args applies to a single task. Yielded results are tuples of the form job_id, sources, ses, seeds (seeds will be used to seed numpy for temporal occurence sampling). """ hc = self.hc rnd = random.Random() rnd.seed(hc.random_seed) task_no = 0 for job_id, block, gsims_by_rlz in super( EventBasedHazardCalculator, self).task_arg_gen(): ss = [(src, rnd.randint(0, models.MAX_SINT_32)) for src in block] # source, seed pairs yield job_id, ss, gsims_by_rlz, task_no task_no += 1 # now the source_blocks_per_ltpath dictionary can be cleared self.source_blocks_per_ltpath.clear() def initialize_ses_db_records(self, ordinal, rlzs): """ Create :class:`~openquake.engine.db.models.Output`, :class:`~openquake.engine.db.models.SESCollection` and :class:`~openquake.engine.db.models.SES` "container" records for a single realization. Stochastic event set ruptures computed for this realization will be associated to these containers. NOTE: Many tasks can contribute ruptures to the same SES. """ rlz_ids = [r.id for r in rlzs] output = models.Output.objects.create( oq_job=self.job, display_name='SES Collection smlt-%d-rlz-%s' % ( ordinal, ','.join(map(str, rlz_ids))), output_type='ses') ses_coll = models.SESCollection.objects.create( output=output, lt_realization_ids=rlz_ids, ordinal=ordinal) for rlz in rlzs: if self.job.hazard_calculation.ground_motion_fields: output = models.Output.objects.create( oq_job=self.job, display_name='GMF rlz-%s' % rlz.id, output_type='gmf') models.Gmf.objects.create(output=output, lt_realization=rlz) all_ses = [] for i in xrange(1, self.hc.ses_per_logic_tree_path + 1): all_ses.append( models.SES.objects.create( ses_collection=ses_coll, investigation_time=self.hc.investigation_time, ordinal=i)) return all_ses def pre_execute(self): """ Do pre-execution work. At the moment, this work entails: parsing and initializing sources, parsing and initializing the site model (if there is one), parsing vulnerability and exposure files, and generating logic tree realizations. (The latter piece basically defines the work to be done in the `execute` phase.) """ super(EventBasedHazardCalculator, self).pre_execute() for i, rlzs in enumerate(self.rlzs_per_ltpath.itervalues()): self.initialize_ses_db_records(i, rlzs) def post_process(self): """ If requested, perform additional processing of GMFs to produce hazard curves. """ if self.hc.hazard_curves_from_gmfs: with EnginePerformanceMonitor('generating hazard curves', self.job.id): self.parallelize( post_processing.gmf_to_hazard_curve_task, post_processing.gmf_to_hazard_curve_arg_gen(self.job), self.log_percent) # If `mean_hazard_curves` is True and/or `quantile_hazard_curves` # has some value (not an empty list), do this additional # post-processing. if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves: with EnginePerformanceMonitor( 'generating mean/quantile curves', self.job.id): self.do_aggregate_post_proc() if self.hc.hazard_maps: with EnginePerformanceMonitor( 'generating hazard maps', self.job.id): self.parallelize( cls_post_proc.hazard_curves_to_hazard_map_task, cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen( self.job), self.log_percent) Better logging Former-commit-id: 2aab78a33f1201da1f1303d8ce962c3adad29f42 [formerly 2aab78a33f1201da1f1303d8ce962c3adad29f42 [formerly 134eb0fdbc2cbe930d1ef83e78f99bd17e4cf8d4]] Former-commit-id: e6ae51800349022996b47051b440a266337eedfa Former-commit-id: cccc55f92a0fbbbdff44f068f87bed12303b2366 # Copyright (c) 2010-2013, GEM Foundation. # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. """ Core calculator functionality for computing stochastic event sets and ground motion fields using the 'event-based' method. Stochastic events sets (which can be thought of as collections of ruptures) are computed iven a set of seismic sources and investigation time span (in years). For more information on computing stochastic event sets, see :mod:`openquake.hazardlib.calc.stochastic`. One can optionally compute a ground motion field (GMF) given a rupture, a site collection (which is a collection of geographical points with associated soil parameters), and a ground shaking intensity model (GSIM). For more information on computing ground motion fields, see :mod:`openquake.hazardlib.calc.gmf`. """ import time import random import collections import numpy.random from django.db import transaction from openquake.hazardlib.calc import gmf from openquake.hazardlib.imt import from_string from openquake.engine import logs, writer from openquake.engine.calculators.hazard import general from openquake.engine.calculators.hazard.classical import ( post_processing as cls_post_proc) from openquake.engine.calculators.hazard.event_based import post_processing from openquake.engine.db import models from openquake.engine.utils import tasks from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor #: Always 1 for the computation of ground motion fields in the event-based #: hazard calculator. DEFAULT_GMF_REALIZATIONS = 1 # NB: beware of large caches inserter = writer.CacheInserter(models.GmfData, 1000) @tasks.oqtask def compute_ses_and_gmfs(job_id, src_seeds, gsims_by_rlz, task_no): """ Celery task for the stochastic event set calculator. Samples logic trees and calls the stochastic event set calculator. Once stochastic event sets are calculated, results will be saved to the database. See :class:`openquake.engine.db.models.SESCollection`. Optionally (specified in the job configuration using the `ground_motion_fields` parameter), GMFs can be computed from each rupture in each stochastic event set. GMFs are also saved to the database. :param int job_id: ID of the currently running job. :param src_seeds: List of pairs (source, seed) :params gsims_by_rlz: dictionary of GSIM :param task_no: an ordinal so that GMV can be collected in a reproducible order """ rlz_ids = [r.id for r in gsims_by_rlz] ses_coll = models.SESCollection.objects.get(lt_realization_ids=rlz_ids) hc = models.HazardCalculation.objects.get(oqjob=job_id) all_ses = models.SES.objects.filter(ses_collection=ses_coll) imts = map(from_string, hc.intensity_measure_types) params = dict( correl_model=general.get_correl_model(hc), truncation_level=hc.truncation_level, maximum_distance=hc.maximum_distance, num_sites=len(hc.site_collection)) collector = GmfCollector( [s.id for s in hc.site_collection], params, imts, gsims_by_rlz) mon1 = LightMonitor('filtering sites', job_id, compute_ses_and_gmfs) mon2 = LightMonitor('generating ruptures', job_id, compute_ses_and_gmfs) mon3 = LightMonitor('filtering ruptures', job_id, compute_ses_and_gmfs) mon4 = LightMonitor('saving ses', job_id, compute_ses_and_gmfs) mon5 = LightMonitor('computing gmfs', job_id, compute_ses_and_gmfs) # Compute and save stochastic event sets rnd = random.Random() for src, seed in src_seeds: t0 = time.time() rnd.seed(seed) with mon1: # filtering sources s_sites = src.filter_sites_by_distance_to_source( hc.maximum_distance, hc.site_collection ) if hc.maximum_distance else hc.site_collection if s_sites is None: continue # NB: the number of occurrences is very low, << 1, so it is # more efficient to filter only the ruptures that occur # and not to compute the occurrencies of the filtered ruptures # the dictionary `ses_num_occ` contains [(ses, num_occurrences)] # for each occurring rupture for each ses in the ses collection ses_num_occ = collections.defaultdict(list) with mon2: # generating ruptures for rup in src.iter_ruptures(): for ses in all_ses: numpy.random.seed(rnd.randint(0, models.MAX_SINT_32)) num_occurrences = rup.sample_number_of_occurrences() if num_occurrences: ses_num_occ[rup].append((ses, num_occurrences)) for rup in ses_num_occ: with mon3: # filtering ruptures r_sites = rup.source_typology.\ filter_sites_by_distance_to_rupture( rup, hc.maximum_distance, s_sites ) if hc.maximum_distance else s_sites if r_sites is None: continue # saving ses and generating gmf for ses, num_occurrences in ses_num_occ[rup]: for occ in range(1, num_occurrences + 1): with mon4: # saving ruptures rup_id = models.SESRupture.objects.create( ses=ses, rupture=rup, tag='smlt=%02d|ses=%04d|src=%s|occ=%02d' % (ses_coll.ordinal, ses.ordinal, src.source_id, occ), hypocenter=rup.hypocenter.wkt2d, magnitude=rup.mag).id if hc.ground_motion_fields: with mon5: # computing GMFs rup_seed = rnd.randint(0, models.MAX_SINT_32) collector.calc_gmf(r_sites, rup, rup_id, rup_seed) # log calc_time per rupture if ses_num_occ: num_ruptures = sum(occ for ses, occ in ses_num_occ[rup] for rup in ses_num_occ) logs.LOG.info( 'job=%d, src=%s:%s, num_ruptures=%d/%d, calc_time=%fs', job_id, src.source_id, src.__class__.__name__, len(ses_num_occ), num_ruptures, time.time() - t0) mon1.flush() mon2.flush() mon3.flush() mon4.flush() mon5.flush() if hc.ground_motion_fields: with EnginePerformanceMonitor( 'saving gmfs', job_id, compute_ses_and_gmfs): collector.save_gmfs(task_no) class GmfCollector(object): """ A class to compute and save ground motion fields. """ def __init__(self, site_ids, params, imts, gsims_by_rlz): self.site_ids = site_ids self.params = params self.imts = imts self.gsims_by_rlz = gsims_by_rlz self.gmvs_per_site = collections.defaultdict(list) self.ruptures_per_site = collections.defaultdict(list) def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed): """ Compute the GMF generated by the given rupture on the given sites and collect the values in the dictionaries .gmvs_per_site and .ruptures_per_site. """ triples = [(rupture, rupture_id, rupture_seed)] for rlz, gsims in self.gsims_by_rlz.items(): for imt, idx, gmv, rup_id in _compute_gmf( self.params, self.imts, gsims, r_sites, triples): if gmv: site_id = self.site_ids[idx] self.gmvs_per_site[rlz, imt, site_id].append(gmv) self.ruptures_per_site[rlz, imt, site_id].append(rup_id) @transaction.commit_on_success(using='job_init') def save_gmfs(self, task_no): """ Helper method to save the computed GMF data to the database. :param task_no: The ordinal of the task which generated the current GMFs to save """ for rlz, imt, site_id in self.gmvs_per_site: imt_name, sa_period, sa_damping = imt inserter.add(models.GmfData( gmf=models.Gmf.objects.get(lt_realization=rlz), task_no=task_no, imt=imt_name, sa_period=sa_period, sa_damping=sa_damping, site_id=site_id, gmvs=self.gmvs_per_site[rlz, imt, site_id], rupture_ids=self.ruptures_per_site[rlz, imt, site_id])) inserter.flush() self.gmvs_per_site.clear() self.ruptures_per_site.clear() # NB: I tried to return a single dictionary {site_id: [(gmv, rupt_id),...]} # but it takes a lot more memory (MS) def _compute_gmf(params, imts, gsims, site_coll, rupture_id_seed_triples): """ Compute a ground motion field value for each rupture, for all the points affected by that rupture, for the given IMT. Returns a dictionary with the nonzero contributions to each site id, and a dictionary with the ids of the contributing ruptures for each site id. assert len(ruptures) == len(rupture_seeds) :param params: a dictionary containing the keys correl_model, truncation_level, maximum_distance :param imts: a list of hazardlib intensity measure types :param gsims: a dictionary {tectonic region type -> GSIM instance} :param site_coll: a SiteCollection instance :param rupture_id_seed_triple: a list of triples with types (:class:`openquake.hazardlib.source.rupture.Rupture`, int, int) """ # Compute and save ground motion fields for rupture, rup_id, rup_seed in rupture_id_seed_triples: gmf_calc_kwargs = { 'rupture': rupture, 'sites': site_coll, 'imts': imts, 'gsim': gsims[rupture.tectonic_region_type], 'truncation_level': params['truncation_level'], 'realizations': DEFAULT_GMF_REALIZATIONS, 'correlation_model': params['correl_model'], 'num_sites': params['num_sites'], } numpy.random.seed(rup_seed) gmf_dict = gmf.ground_motion_fields(**gmf_calc_kwargs) for imt, gmf_1_realiz in gmf_dict.iteritems(): # since DEFAULT_GMF_REALIZATIONS is 1, gmf_1_realiz is a matrix # with n_sites rows and 1 column for idx, gmv in enumerate(gmf_1_realiz): # convert a 1x1 matrix into a float yield imt, idx, float(gmv), rup_id class EventBasedHazardCalculator(general.BaseHazardCalculator): """ Probabilistic Event-Based hazard calculator. Computes stochastic event sets and (optionally) ground motion fields. """ core_calc_task = compute_ses_and_gmfs def task_arg_gen(self, _block_size=None): """ Loop through realizations and sources to generate a sequence of task arg tuples. Each tuple of args applies to a single task. Yielded results are tuples of the form job_id, sources, ses, seeds (seeds will be used to seed numpy for temporal occurence sampling). """ hc = self.hc rnd = random.Random() rnd.seed(hc.random_seed) task_no = 0 for job_id, block, gsims_by_rlz in super( EventBasedHazardCalculator, self).task_arg_gen(): ss = [(src, rnd.randint(0, models.MAX_SINT_32)) for src in block] # source, seed pairs yield job_id, ss, gsims_by_rlz, task_no task_no += 1 # now the source_blocks_per_ltpath dictionary can be cleared self.source_blocks_per_ltpath.clear() def initialize_ses_db_records(self, ordinal, rlzs): """ Create :class:`~openquake.engine.db.models.Output`, :class:`~openquake.engine.db.models.SESCollection` and :class:`~openquake.engine.db.models.SES` "container" records for a single realization. Stochastic event set ruptures computed for this realization will be associated to these containers. NOTE: Many tasks can contribute ruptures to the same SES. """ rlz_ids = [r.id for r in rlzs] output = models.Output.objects.create( oq_job=self.job, display_name='SES Collection smlt-%d-rlz-%s' % ( ordinal, ','.join(map(str, rlz_ids))), output_type='ses') ses_coll = models.SESCollection.objects.create( output=output, lt_realization_ids=rlz_ids, ordinal=ordinal) for rlz in rlzs: if self.job.hazard_calculation.ground_motion_fields: output = models.Output.objects.create( oq_job=self.job, display_name='GMF rlz-%s' % rlz.id, output_type='gmf') models.Gmf.objects.create(output=output, lt_realization=rlz) all_ses = [] for i in xrange(1, self.hc.ses_per_logic_tree_path + 1): all_ses.append( models.SES.objects.create( ses_collection=ses_coll, investigation_time=self.hc.investigation_time, ordinal=i)) return all_ses def pre_execute(self): """ Do pre-execution work. At the moment, this work entails: parsing and initializing sources, parsing and initializing the site model (if there is one), parsing vulnerability and exposure files, and generating logic tree realizations. (The latter piece basically defines the work to be done in the `execute` phase.) """ super(EventBasedHazardCalculator, self).pre_execute() for i, rlzs in enumerate(self.rlzs_per_ltpath.itervalues()): self.initialize_ses_db_records(i, rlzs) def post_process(self): """ If requested, perform additional processing of GMFs to produce hazard curves. """ if self.hc.hazard_curves_from_gmfs: with EnginePerformanceMonitor('generating hazard curves', self.job.id): self.parallelize( post_processing.gmf_to_hazard_curve_task, post_processing.gmf_to_hazard_curve_arg_gen(self.job), self.log_percent) # If `mean_hazard_curves` is True and/or `quantile_hazard_curves` # has some value (not an empty list), do this additional # post-processing. if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves: with EnginePerformanceMonitor( 'generating mean/quantile curves', self.job.id): self.do_aggregate_post_proc() if self.hc.hazard_maps: with EnginePerformanceMonitor( 'generating hazard maps', self.job.id): self.parallelize( cls_post_proc.hazard_curves_to_hazard_map_task, cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen( self.job), self.log_percent)
__copyright__ = "Copyright 2017 Birkbeck, University of London" __author__ = "Martin Paul Eve & Andy Byers" __license__ = "AGPL v3" __maintainer__ = "Birkbeck Centre for Technology and Publishing" from django.urls import reverse from utils import ( notify_helpers, models as util_models, setting_handler, render_template, ) from core import models as core_models from review import logic as review_logic def send_reviewer_withdrawl_notice(**kwargs): review_assignment = kwargs['review_assignment'] request = kwargs['request'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0}\'s review of "{1}" has been withdrawn by {2}'.format(review_assignment.reviewer.full_name(), review_assignment.article.title, request.user.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Withdrawl', 'target': review_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_review_withdrawl', review_assignment.reviewer.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_editor_unassigned_notice(request, message, assignment, skip=False): description = "{a.editor} unassigned from {a.article} by {r.user}".format( a=assignment, r=request, ) if not skip: log_dict = { 'level': 'Info', 'action_text': description, 'types': 'Editor Unassigned', 'target': assignment.article } notify_helpers.send_email_with_body_from_user( request, 'subject_review_withdrawl', assignment.editor.email, message, log_dict=log_dict, ) notify_helpers.send_slack(request, description, ['slack_editors']) def send_editor_assigned_acknowledgements_mandatory(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. It is different to the below function in that this is called when an editor is assigned, whereas the below is only called when the user opts to send a message to the editor. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ editor_assignment = kwargs['editor_assignment'] article = editor_assignment.article request = kwargs['request'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] acknowledgement = kwargs['acknowledgement'] description = '{0} was assigned as the editor for "{1}"'.format(editor_assignment.editor.full_name(), article.title) context = { 'article': article, 'request': request, 'editor_assignment': editor_assignment } log_dict = {'level': 'Info', 'action_text': description, 'types': 'Editor Assignment', 'target': article} # send to assigned editor if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_editor_assignment', editor_assignment.editor.email, user_message_content, log_dict=log_dict) # send to editor if not acknowledgement: notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'editor_assignment', 'subject_editor_assignment', request.user.email, context, log_dict=log_dict) def send_editor_assigned_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ kwargs['acknowledgement'] = True send_editor_assigned_acknowledgements_mandatory(**kwargs) def send_reviewer_requested_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ kwargs['acknowledgement'] = True send_reviewer_requested_acknowledgements_mandatory(**kwargs) def send_reviewer_requested_acknowledgements_mandatory(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has been requested. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, user_message_content, skip (boolean) and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] user_message_content = kwargs['user_message_content'] acknowledgement = kwargs['acknowledgement'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = 'A review request was added to "{0}" for user {1}'.format(article.title, review_assignment.reviewer.full_name()) context = { 'article': article, 'request': request, 'review_assignment': review_assignment } log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Request', 'target': article} # send to requested reviewer if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_review_request_sent', review_assignment.reviewer.email, user_message_content, log_dict=log_dict) if not acknowledgement: # send slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_request_sent', 'subject_review_request_sent', review_assignment.editor.email, context, log_dict=log_dict) def send_review_complete_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has completed his or her review. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] request.user = review_assignment.reviewer description = '{0} completed the review of "{1}": {2}'.format(review_assignment.reviewer.full_name(), article.title, review_assignment.get_decision_display()) util_models.LogEntry.add_entry(types='Review Complete', description=description, level='Info', actor=request.user, target=article, request=request) context = { 'article': article, 'request': request, 'review_assignment': review_assignment } # send slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to reviewer notify_helpers.send_email_with_body_from_setting_template(request, 'review_complete_reviewer_acknowledgement', 'subject_review_complete_reviewer_acknowledgement', review_assignment.reviewer.email, context) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_complete_acknowledgement', 'subject_review_complete_reviewer_acknowledgement', review_assignment.editor.email, context) def send_reviewer_accepted_or_decline_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has either accepted or declined to review. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, accepted and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] accepted = kwargs['accepted'] description = '{0} {1} to review {2}'.format(review_assignment.reviewer.full_name(), ('accepted' if accepted else 'declined'), article.title) util_models.LogEntry.add_entry(types='Review request {0}'.format(('accepted' if accepted else 'declined')), description=description, level='Info', actor=request.user, target=article, request=request) review_url = review_logic.get_review_url( request, review_assignment, ) context = { 'article': article, 'request': request, 'review_assignment': review_assignment, 'review_url': review_url, } # send to slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to reviewer if accepted: notify_helpers.send_email_with_body_from_setting_template( request, 'review_accept_acknowledgement', 'subject_review_accept_acknowledgement', review_assignment.reviewer.email, context, ) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_acknowledgement', 'subject_review_acknowledgement', review_assignment.editor.email, context) else: notify_helpers.send_email_with_body_from_setting_template(request, 'review_decline_acknowledgement', 'subject_review_decline_acknowledgement', review_assignment.reviewer.email, context) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_acknowledgement', 'subject_review_acknowledgement', review_assignment.editor.email, context) def send_submission_acknowledgement(**kwargs): """ This function is called via the event handling framework and it notifies site operators of a submission. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes article and request :return: None """ article = kwargs['article'] request = kwargs['request'] util_models.LogEntry.add_entry( types='Submission Complete', description='A new article {0} was submitted'.format(article.title), level='Info', actor=request.user, target=article, request=request, ) log_dict = { 'level': 'Info', 'action_text': 'A new article {0} was submitted'.format(article.title), 'types': 'New Submission Acknowledgement', 'target': article, } # generate URL editor_review_url = request.journal.site_url( path=reverse( 'review_unassigned_article', kwargs={'article_id': article.pk}, ) ) notify_helpers.send_slack( request, 'New submission: {0} {1}'.format(article.title, editor_review_url), ['slack_editors']) # send to author context = { 'article': article, 'request': request, 'editor_review_url': editor_review_url, } notify_helpers.send_email_with_body_from_setting_template( request, 'submission_acknowledgement', 'subject_submission_acknowledgement', article.correspondence_author.email, context, log_dict=log_dict, ) # send to all authors editors_to_email = setting_handler.get_setting( 'general', 'editors_for_notification', request.journal).processed_value if editors_to_email: editor_pks = [int(pk) for pk in editors_to_email] editor_emails = {role.user.email for role in core_models.AccountRole.objects.filter( role__slug='editor', user__id__in=editor_pks)} else: editor_emails = set(request.journal.editor_emails) assigned_to_section = ( article.section.editors.all() | article.section.section_editors.all()) editor_emails |= {editor.email for editor in assigned_to_section} notify_helpers.send_email_with_body_from_setting_template(request, 'editor_new_submission', 'subject_editor_new_submission', editor_emails, context, log_dict=log_dict) def send_article_decision(**kwargs): article = kwargs['article'] request = kwargs['request'] decision = kwargs['decision'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0}\'s article "{1}" has been {2}ed by {3}'.format(article.correspondence_author.full_name(), article.title, decision, request.user.full_name()) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Article Decision', 'target': article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'Article Review Decision', article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_revisions_request(**kwargs): request = kwargs['request'] revision = kwargs['revision'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0} has requested revisions for {1} due on {2}'.format(request.user.full_name(), revision.article.title, revision.date_due) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Revision Request', 'target': revision.article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_request_revisions', revision.article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_revisions_complete(**kwargs): request = kwargs['request'] revision = kwargs['revision'] action_text = '' for action in revision.actions.all(): action_text = "{0}<br><br>{1} - {2}".format(action_text, action.logged, action.text) description = '<p>{0} has completed revisions for {1}</p> Actions:<br>{2}'.format(request.user.full_name(), revision.article.title, action_text) notify_helpers.send_email_with_body_from_user(request, 'Article Revisions Complete', revision.editor.email, description) notify_helpers.send_slack(request, description, ['slack_editors']) util_models.LogEntry.add_entry(types='Revisions Complete', description=action_text, level='Info', request=request, target=revision.article) def send_copyedit_assignment(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has requested copyediting for {1} due on {2}'.format(request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.due) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Assignment', 'target': copyedit_assignment.article} response = notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_assignment_notification', copyedit_assignment.copyeditor.email, user_message_content, log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_updated(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] skip = kwargs.get('skip', False) if not skip: # send to slack notify_helpers.send_slack(request, 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk), ['slack_editors']) log_dict = {'level': 'Info', 'action_text': 'Copyedit assignment #{number} update.'.format(number=copyedit_assignment.pk), 'types': 'Revision Request', 'target': copyedit_assignment.article} # send to author notify_helpers.send_email_with_body_from_setting_template(request, 'copyedit_updated', 'subject_copyedit_updated', copyedit_assignment.copyeditor.email, context={'request': request, 'copyedit_assignment': copyedit_assignment}, log_dict=log_dict) def send_copyedit_deleted(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] skip = kwargs.get('skip', False) description = 'Copyedit task {0} for article {1} deleted.'.format(copyedit_assignment.pk, copyedit_assignment.article.title) if not skip: # send to slack notify_helpers.send_slack(request, 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk), ['slack_editors']) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Assignment Deleted', 'target': copyedit_assignment.article} # send to copyeditor notify_helpers.send_email_with_body_from_setting_template(request, 'copyedit_deleted', 'subject_copyedit_deleted', copyedit_assignment.copyeditor.email, context={'request': request, 'copyedit_assignment': copyedit_assignment}, log_dict=log_dict) def send_copyedit_decision(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] description = '{0} has accepted copyediting task for {1} due on {2}.'.format( copyedit_assignment.copyeditor.full_name(), copyedit_assignment.article.title, copyedit_assignment.due) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyediting Decision', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Copyediting Decision', copyedit_assignment.editor.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_author_review(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has requested copyedit review for {1} from {2}'.format( request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.article.correspondence_author.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Author Review', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_notify_author', copyedit_assignment.article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_complete(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] article = kwargs['article'] description = 'Copyediting requested by {0} from {1} for article {2} has been completed'.format( request.user.full_name(), copyedit_assignment.copyeditor.full_name(), article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Complete', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_notify_editor', copyedit_assignment.editor.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_ack(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has acknowledged copyediting for {1}'.format(request.user.full_name(), copyedit_assignment.article.title, ) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Acknowledgement', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_ack', copyedit_assignment.copyeditor.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_reopen(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has reopened copyediting for {1} from {2}'.format(request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.copyeditor.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Complete', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_reopen_task', copyedit_assignment.copyeditor.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_assignment(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has been assigned as a typesetter for {1}'.format(typeset_task.typesetter.full_name(), typeset_task.assignment.article.title) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Assignment', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_notification', typeset_task.typesetter.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_decision(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] decision = kwargs['decision'] description = '{0} has {1}ed the typesetting task for {2}'.format(typeset_task.typesetter.full_name(), decision, typeset_task.assignment.article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Decision', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Typesetting Decision', typeset_task.assignment.production_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_task_deleted(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset'] description = '{0} has deleted a typesetter task assigned to {1} for article {2}'.format( request.user.full_name(), typeset_task.typesetter.full_name(), typeset_task.assignment.article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Assignment Deleted', 'target': typeset_task.assignment.article} # send to author notify_helpers.send_email_with_body_from_setting_template(request, 'typeset_deleted', 'subject_typeset_deleted', typeset_task.typesetter.email, context={'request': request, 'typeset_task': typeset_task}, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_complete(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] description = '{0} has completed typesetting for article {1}. \n\nThe following note was supplied:\n\n{2}'.format( typeset_task.typesetter.full_name(), typeset_task.assignment.article.title, typeset_task.note_from_typesetter, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Assignment Complete', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_complete_notification', typeset_task.assignment.production_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_production_complete(**kwargs): request = kwargs['request'] article = kwargs['article'] user_content_message = kwargs['user_content_message'] assignment = kwargs['assignment'] description = 'Production has been completed for article {0}.'.format(article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Production Complete', 'target': article} for task in assignment.typesettask_set.all(): notify_helpers.send_email_with_body_from_user(request, 'Article Production Complete', task.typesetter.email, user_content_message) notify_helpers.send_email_with_body_from_setting_template(request, 'production_complete', 'subject_production_complete', article.editor_emails(), {'article': article, 'assignment': assignment}, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def fire_proofing_manager_assignment(**kwargs): request = kwargs['request'] proofing_assignment = kwargs['proofing_assignment'] article = proofing_assignment.article description = '{0} has been assigned as proofing manager for {1}'.format( proofing_assignment.proofing_manager.full_name(), article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Manager Assigned', 'target': article} context = {'request': request, 'proofing_assignment': proofing_assignment, 'article': article} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofing_manager', 'subject_notify_proofing_manager', proofing_assignment.proofing_manager.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def cancel_proofing_task(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] user_content_message = kwargs.get('user_content_message', '') description = 'Proofing request for article {0} from {1} has been cancelled by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Cancelled', 'target': article} context = {'request': request, 'proofing_task': proofing_task, 'user_content_message': user_content_message} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_cancelled', 'subject_notify_proofreader_cancelled', proofing_task.proofreader.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def edit_proofing_task(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] description = 'Proofing request for article {0} from {1} has been edited by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) context = {'request': request, 'proofing_task': proofing_task} log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Edited', 'target': article} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_edited', 'subject_notify_proofreader_edited', proofing_task.proofreader.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def notify_proofreader(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] user_content_message = kwargs['user_content_message'] description = 'Proofing request for article {0} from {1} has been requested by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Requested', 'target': article} notify_helpers.send_email_with_body_from_user(request, 'subject_notify_proofreader_assignment', proofing_task.proofreader.email, user_content_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofreader_decision(**kwargs): request = kwargs['request'] proofing_task = kwargs['proofing_task'] decision = kwargs['decision'] description = '{0} has made a decision for proofing task on {1}: {2}'.format( proofing_task.proofreader.full_name(), proofing_task.round.assignment.article.title, decision ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Update', 'target': proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Proofreading Update', proofing_task.round.assignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofreader_complete_notification(**kwargs): request = kwargs['request'] proofing_task = kwargs['proofing_task'] article = kwargs['article'] description = '{0} has completed a proofing task for {1}'.format( proofing_task.proofreader.full_name(), article.title, ) notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_complete', 'subject_notify_proofreader_complete', proofing_task.round.assignment.proofing_manager.email, {'proofing_task': proofing_task}) def send_proofing_typeset_request(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] article = kwargs['article'] user_content_message = kwargs['user_content_message'] skip = kwargs['skip'] description = '{0} has requested typesetting updates from {1} for {2}'.format( request.user.full_name(), typeset_task.typesetter.full_name(), article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Updates Requested', 'target': article} if not skip: notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_user( request, 'subject_notify_typesetter_proofing_changes', typeset_task.typesetter.email, user_content_message, log_dict=log_dict) def send_proofing_typeset_decision(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] decision = kwargs['decision'] description = '{0} has made a decision for proofing task on {1}: {2}'.format( typeset_task.typesetter.full_name(), typeset_task.proofing_task.round.assignment.article.title, decision ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Typesetting', 'target': typeset_task.proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Proofing Typesetting Changes', typeset_task.proofing_task.round.assignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_corrections_complete(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] article = kwargs['article'] description = '{0} has completed corrections task for article {1} (proofing task {2}'.format( request.user.full_name(), article.title, typeset_task.pk, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Typesetting Complete', 'target': typeset_task.proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_corrections_complete', article.proofingassignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofing_ack(**kwargs): request = kwargs['request'] user_message = kwargs['user_message'] article = kwargs['article'] model_object = kwargs['model_object'] model_name = kwargs['model_name'] skip = kwargs['skip'] description = "{0} has acknowledged a task , {1}, by {2} for article {3}".format(request.user, model_name, model_object.actor().full_name(), article.title) if not skip: notify_helpers.send_email_with_body_from_user(request, 'Proofing Acknowledgement', model_object.actor().email, user_message) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofing_complete(**kwargs): request = kwargs['request'] user_message = kwargs['user_message'] article = kwargs['article'] skip = kwargs['skip'] description = "Proofing is now complete for {0}".format(article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Complete', 'target': article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_notify_editor_proofing_complete', article.editor_emails(), user_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_author_publication_notification(**kwargs): request = kwargs['request'] article = kwargs['article'] user_message = kwargs['user_message'] section_editors = kwargs['section_editors'] peer_reviewers = kwargs['peer_reviewers'] description = "Article, {0}, set for publication on {1}, by {2}".format(article.title, article.date_published, request.user.full_name()) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Article Published', 'target': article} notify_helpers.send_email_with_body_from_user(request, '{0} Publication'.format(article.title), article.correspondence_author.email, user_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) # Check for SEs and PRs and notify them as well if section_editors: for editor in article.section_editors(): notify_helpers.send_email_with_body_from_setting_template(request, 'section_editor_pub_notification', 'Article set for publication', editor.email, {'article': article, 'editor': editor}) if peer_reviewers: for reviewer in article.peer_reviewers(): notify_helpers.send_email_with_body_from_setting_template(request, 'peer_reviewer_pub_notification', 'Article set for publication', reviewer.email, {'article': article, 'reviewer': reviewer}) def review_sec_override_notification(**kwargs): request = kwargs['request'] override = kwargs['override'] description = "{0} overrode their access to {1}".format(override.editor.full_name(), override.article.title) log_dict = {'level': 'Warning', 'action_text': description, 'types': 'Security Override', 'target': override.article} notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_user(request, 'Review Security Override', request.journal.editor_emails, description, log_dict=log_dict) def send_draft_decison(**kwargs): request = kwargs['request'] draft = kwargs['draft'] article = kwargs['article'] emails = article.section.editor_emails() if not emails: emails = request.journal.editor_emails description = "Section Editor {0} has drafted a decision for Article {1}".format( draft.section_editor.full_name(), article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Draft Decision', 'target': article} notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'draft_editor_message', 'subject_draft_editor_message', emails, {'draft': draft, 'article': article}, log_dict=log_dict) def send_author_copyedit_complete(**kwargs): request = kwargs['request'] copyedit = kwargs['copyedit'] author_review = kwargs['author_review'] description = "Author {0} has completed their copyediting task for article {1}".format( author_review.author.full_name(), copyedit.article.title, ) notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'author_copyedit_complete', 'subject_author_copyedit_complete', copyedit.editor.email, {'copyedit': copyedit, 'author_review': author_review}) def preprint_submission(**kwargs): """ Called by events.Event.ON_PRPINT_SUBMISSIONS, logs and emails the author and preprint editor. :param kwargs: Dictionary containing article and request objects :return: None """ request = kwargs.get('request') article = kwargs.get('article') description = '{author} has submitted a new preprint titled {title}.'.format(author=request.user.full_name(), title=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Submission', 'target': article} # Send an email to the user context = {'article': article} template = request.press.preprint_submission email_text = render_template.get_message_content(request, context, template, template_is_setting=True) notify_helpers.send_email_with_body_from_user(request, 'Preprint Submission', request.user.email, email_text, log_dict=log_dict) # Send an email to the preprint editor url = request.press_base_url + reverse('preprints_manager_article', kwargs={'article_id': article.pk}) editor_email_text = 'A new preprint has been submitted to {press}: <a href="{url}">{title}</a>.'.format( press=request.press.name, url=url, title=article.title ) for editor in request.press.preprint_editors(): notify_helpers.send_email_with_body_from_user(request, 'Preprint Submission', editor.email, editor_email_text, log_dict=log_dict) def preprint_publication(**kwargs): """ Called by events.Event.ON_PREPRINT_PUBLICATIONS handles logging and emails. :param kwargs: Dictionary containing article and request objects :return: None """ request = kwargs.get('request') article = kwargs.get('article') description = '{editor} has published a preprint titled {title}.'.format(editor=request.user.full_name(), title=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Preprint Publication', 'target': article} util_models.LogEntry.add_entry('Publication', description, 'Info', request.user, request, article) # Send an email to the article owner. context = {'article': article} template = request.press.preprint_publication email_text = render_template.get_message_content(request, context, template, template_is_setting=True) notify_helpers.send_email_with_body_from_user(request, ' Preprint Submission Decision', article.owner.email, email_text, log_dict=log_dict) # Stops this notification being sent multiple times.c article.preprint_decision_notification = True article.save() def preprint_comment(**kwargs): request = kwargs.get('request') article = kwargs.get('article') email_text = 'A comment has been made on your article {article}, you can moderate comments ' \ '<a href="{base_url}{url}">on the journal site</a>.'.format( article=article.title, base_url=request.press_base_url, url=reverse('preprints_comments', kwargs={'article_id': article.pk})) description = '{author} commented on {article}'.format(author=request.user.full_name(), article=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Preprint Comment', 'target': article} notify_helpers.send_email_with_body_from_user(request, ' Preprint Comment', article.owner.email, email_text, log_dict=log_dict) def send_cancel_corrections(**kwargs): request = kwargs.get('request') article = kwargs.get('article') correction = kwargs.get('correction') description = '{user} has cancelled correction task {task}'.format( user=request.user, task=correction, ) log_dict = { 'level': 'Info', 'action_text': description, 'types': 'Correction Cancelled', 'target': article, } notify_helpers.send_email_with_body_from_setting_template( request, 'notify_correction_cancelled', 'subject_notify_correction_cancelled', correction.typesetter.email, context=kwargs, log_dict=log_dict, ) #1952 completes implementation of review_unassigned_article_url __copyright__ = "Copyright 2017 Birkbeck, University of London" __author__ = "Martin Paul Eve & Andy Byers" __license__ = "AGPL v3" __maintainer__ = "Birkbeck Centre for Technology and Publishing" from django.urls import reverse from utils import ( notify_helpers, models as util_models, setting_handler, render_template, ) from core import models as core_models from review import logic as review_logic def send_reviewer_withdrawl_notice(**kwargs): review_assignment = kwargs['review_assignment'] request = kwargs['request'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0}\'s review of "{1}" has been withdrawn by {2}'.format(review_assignment.reviewer.full_name(), review_assignment.article.title, request.user.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Withdrawl', 'target': review_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_review_withdrawl', review_assignment.reviewer.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_editor_unassigned_notice(request, message, assignment, skip=False): description = "{a.editor} unassigned from {a.article} by {r.user}".format( a=assignment, r=request, ) if not skip: log_dict = { 'level': 'Info', 'action_text': description, 'types': 'Editor Unassigned', 'target': assignment.article } notify_helpers.send_email_with_body_from_user( request, 'subject_review_withdrawl', assignment.editor.email, message, log_dict=log_dict, ) notify_helpers.send_slack(request, description, ['slack_editors']) def send_editor_assigned_acknowledgements_mandatory(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. It is different to the below function in that this is called when an editor is assigned, whereas the below is only called when the user opts to send a message to the editor. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ editor_assignment = kwargs['editor_assignment'] article = editor_assignment.article request = kwargs['request'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] acknowledgement = kwargs['acknowledgement'] description = '{0} was assigned as the editor for "{1}"'.format(editor_assignment.editor.full_name(), article.title) context = { 'article': article, 'request': request, 'editor_assignment': editor_assignment } log_dict = {'level': 'Info', 'action_text': description, 'types': 'Editor Assignment', 'target': article} # send to assigned editor if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_editor_assignment', editor_assignment.editor.email, user_message_content, log_dict=log_dict) # send to editor if not acknowledgement: notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'editor_assignment', 'subject_editor_assignment', request.user.email, context, log_dict=log_dict) def send_editor_assigned_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ kwargs['acknowledgement'] = True send_editor_assigned_acknowledgements_mandatory(**kwargs) def send_reviewer_requested_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that an editor has been assigned. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request :return: None """ kwargs['acknowledgement'] = True send_reviewer_requested_acknowledgements_mandatory(**kwargs) def send_reviewer_requested_acknowledgements_mandatory(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has been requested. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, user_message_content, skip (boolean) and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] user_message_content = kwargs['user_message_content'] acknowledgement = kwargs['acknowledgement'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = 'A review request was added to "{0}" for user {1}'.format(article.title, review_assignment.reviewer.full_name()) context = { 'article': article, 'request': request, 'review_assignment': review_assignment } log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Request', 'target': article} # send to requested reviewer if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_review_request_sent', review_assignment.reviewer.email, user_message_content, log_dict=log_dict) if not acknowledgement: # send slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_request_sent', 'subject_review_request_sent', review_assignment.editor.email, context, log_dict=log_dict) def send_review_complete_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has completed his or her review. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] request.user = review_assignment.reviewer description = '{0} completed the review of "{1}": {2}'.format(review_assignment.reviewer.full_name(), article.title, review_assignment.get_decision_display()) util_models.LogEntry.add_entry(types='Review Complete', description=description, level='Info', actor=request.user, target=article, request=request) context = { 'article': article, 'request': request, 'review_assignment': review_assignment } # send slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to reviewer notify_helpers.send_email_with_body_from_setting_template(request, 'review_complete_reviewer_acknowledgement', 'subject_review_complete_reviewer_acknowledgement', review_assignment.reviewer.email, context) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_complete_acknowledgement', 'subject_review_complete_reviewer_acknowledgement', review_assignment.editor.email, context) def send_reviewer_accepted_or_decline_acknowledgements(**kwargs): """ This function is called via the event handling framework and it notifies that a reviewer has either accepted or declined to review. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes review_assignment, accepted and request :return: None """ review_assignment = kwargs['review_assignment'] article = review_assignment.article request = kwargs['request'] accepted = kwargs['accepted'] description = '{0} {1} to review {2}'.format(review_assignment.reviewer.full_name(), ('accepted' if accepted else 'declined'), article.title) util_models.LogEntry.add_entry(types='Review request {0}'.format(('accepted' if accepted else 'declined')), description=description, level='Info', actor=request.user, target=article, request=request) review_url = review_logic.get_review_url( request, review_assignment, ) context = { 'article': article, 'request': request, 'review_assignment': review_assignment, 'review_url': review_url, } # send to slack notify_helpers.send_slack(request, description, ['slack_editors']) # send to reviewer if accepted: notify_helpers.send_email_with_body_from_setting_template( request, 'review_accept_acknowledgement', 'subject_review_accept_acknowledgement', review_assignment.reviewer.email, context, ) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_acknowledgement', 'subject_review_acknowledgement', review_assignment.editor.email, context) else: notify_helpers.send_email_with_body_from_setting_template(request, 'review_decline_acknowledgement', 'subject_review_decline_acknowledgement', review_assignment.reviewer.email, context) # send to editor notify_helpers.send_email_with_body_from_setting_template(request, 'review_acknowledgement', 'subject_review_acknowledgement', review_assignment.editor.email, context) def send_submission_acknowledgement(**kwargs): """ This function is called via the event handling framework and it notifies site operators of a submission. It is wired up in core/urls.py. :param kwargs: a list of kwargs that includes article and request :return: None """ article = kwargs['article'] request = kwargs['request'] util_models.LogEntry.add_entry( types='Submission Complete', description='A new article {0} was submitted'.format(article.title), level='Info', actor=request.user, target=article, request=request, ) log_dict = { 'level': 'Info', 'action_text': 'A new article {0} was submitted'.format(article.title), 'types': 'New Submission Acknowledgement', 'target': article, } # generate URL review_unassigned_article_url = request.journal.site_url( path=reverse( 'review_unassigned_article', kwargs={'article_id': article.pk}, ) ) notify_helpers.send_slack( request, 'New submission: {0} {1}'.format( article.title, review_unassigned_article_url, ), ['slack_editors']) # send to author context = { 'article': article, 'request': request, 'review_unassigned_article_url': review_unassigned_article_url, } notify_helpers.send_email_with_body_from_setting_template( request, 'submission_acknowledgement', 'subject_submission_acknowledgement', article.correspondence_author.email, context, log_dict=log_dict, ) # send to all authors editors_to_email = setting_handler.get_setting( 'general', 'editors_for_notification', request.journal).processed_value if editors_to_email: editor_pks = [int(pk) for pk in editors_to_email] editor_emails = { role.user.email for role in core_models.AccountRole.objects.filter( role__slug='editor', user__id__in=editor_pks, ) } else: editor_emails = set(request.journal.editor_emails) assigned_to_section = ( article.section.editors.all() | article.section.section_editors.all()) editor_emails |= {editor.email for editor in assigned_to_section} notify_helpers.send_email_with_body_from_setting_template( request, 'editor_new_submission', 'subject_editor_new_submission', editor_emails, context, log_dict=log_dict, ) def send_article_decision(**kwargs): article = kwargs['article'] request = kwargs['request'] decision = kwargs['decision'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0}\'s article "{1}" has been {2}ed by {3}'.format(article.correspondence_author.full_name(), article.title, decision, request.user.full_name()) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Article Decision', 'target': article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'Article Review Decision', article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_revisions_request(**kwargs): request = kwargs['request'] revision = kwargs['revision'] user_message_content = kwargs['user_message_content'] if 'skip' not in kwargs: kwargs['skip'] = True skip = kwargs['skip'] description = '{0} has requested revisions for {1} due on {2}'.format(request.user.full_name(), revision.article.title, revision.date_due) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Revision Request', 'target': revision.article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_request_revisions', revision.article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_revisions_complete(**kwargs): request = kwargs['request'] revision = kwargs['revision'] action_text = '' for action in revision.actions.all(): action_text = "{0}<br><br>{1} - {2}".format(action_text, action.logged, action.text) description = '<p>{0} has completed revisions for {1}</p> Actions:<br>{2}'.format(request.user.full_name(), revision.article.title, action_text) notify_helpers.send_email_with_body_from_user(request, 'Article Revisions Complete', revision.editor.email, description) notify_helpers.send_slack(request, description, ['slack_editors']) util_models.LogEntry.add_entry(types='Revisions Complete', description=action_text, level='Info', request=request, target=revision.article) def send_copyedit_assignment(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has requested copyediting for {1} due on {2}'.format(request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.due) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Assignment', 'target': copyedit_assignment.article} response = notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_assignment_notification', copyedit_assignment.copyeditor.email, user_message_content, log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_updated(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] skip = kwargs.get('skip', False) if not skip: # send to slack notify_helpers.send_slack(request, 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk), ['slack_editors']) log_dict = {'level': 'Info', 'action_text': 'Copyedit assignment #{number} update.'.format(number=copyedit_assignment.pk), 'types': 'Revision Request', 'target': copyedit_assignment.article} # send to author notify_helpers.send_email_with_body_from_setting_template(request, 'copyedit_updated', 'subject_copyedit_updated', copyedit_assignment.copyeditor.email, context={'request': request, 'copyedit_assignment': copyedit_assignment}, log_dict=log_dict) def send_copyedit_deleted(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] skip = kwargs.get('skip', False) description = 'Copyedit task {0} for article {1} deleted.'.format(copyedit_assignment.pk, copyedit_assignment.article.title) if not skip: # send to slack notify_helpers.send_slack(request, 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk), ['slack_editors']) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Assignment Deleted', 'target': copyedit_assignment.article} # send to copyeditor notify_helpers.send_email_with_body_from_setting_template(request, 'copyedit_deleted', 'subject_copyedit_deleted', copyedit_assignment.copyeditor.email, context={'request': request, 'copyedit_assignment': copyedit_assignment}, log_dict=log_dict) def send_copyedit_decision(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] description = '{0} has accepted copyediting task for {1} due on {2}.'.format( copyedit_assignment.copyeditor.full_name(), copyedit_assignment.article.title, copyedit_assignment.due) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyediting Decision', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Copyediting Decision', copyedit_assignment.editor.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_author_review(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has requested copyedit review for {1} from {2}'.format( request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.article.correspondence_author.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Author Review', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_notify_author', copyedit_assignment.article.correspondence_author.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_complete(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] article = kwargs['article'] description = 'Copyediting requested by {0} from {1} for article {2} has been completed'.format( request.user.full_name(), copyedit_assignment.copyeditor.full_name(), article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Complete', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_notify_editor', copyedit_assignment.editor.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_ack(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has acknowledged copyediting for {1}'.format(request.user.full_name(), copyedit_assignment.article.title, ) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Acknowledgement', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_ack', copyedit_assignment.copyeditor.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_copyedit_reopen(**kwargs): request = kwargs['request'] copyedit_assignment = kwargs['copyedit_assignment'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has reopened copyediting for {1} from {2}'.format(request.user.full_name(), copyedit_assignment.article.title, copyedit_assignment.copyeditor.full_name()) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Complete', 'target': copyedit_assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_reopen_task', copyedit_assignment.copyeditor.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_assignment(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] user_message_content = kwargs['user_message_content'] skip = kwargs.get('skip', False) description = '{0} has been assigned as a typesetter for {1}'.format(typeset_task.typesetter.full_name(), typeset_task.assignment.article.title) if not skip: log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Assignment', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_notification', typeset_task.typesetter.email, user_message_content, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_decision(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] decision = kwargs['decision'] description = '{0} has {1}ed the typesetting task for {2}'.format(typeset_task.typesetter.full_name(), decision, typeset_task.assignment.article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Decision', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Typesetting Decision', typeset_task.assignment.production_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_task_deleted(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset'] description = '{0} has deleted a typesetter task assigned to {1} for article {2}'.format( request.user.full_name(), typeset_task.typesetter.full_name(), typeset_task.assignment.article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Assignment Deleted', 'target': typeset_task.assignment.article} # send to author notify_helpers.send_email_with_body_from_setting_template(request, 'typeset_deleted', 'subject_typeset_deleted', typeset_task.typesetter.email, context={'request': request, 'typeset_task': typeset_task}, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_typeset_complete(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] description = '{0} has completed typesetting for article {1}. \n\nThe following note was supplied:\n\n{2}'.format( typeset_task.typesetter.full_name(), typeset_task.assignment.article.title, typeset_task.note_from_typesetter, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Assignment Complete', 'target': typeset_task.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_complete_notification', typeset_task.assignment.production_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_production_complete(**kwargs): request = kwargs['request'] article = kwargs['article'] user_content_message = kwargs['user_content_message'] assignment = kwargs['assignment'] description = 'Production has been completed for article {0}.'.format(article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Production Complete', 'target': article} for task in assignment.typesettask_set.all(): notify_helpers.send_email_with_body_from_user(request, 'Article Production Complete', task.typesetter.email, user_content_message) notify_helpers.send_email_with_body_from_setting_template(request, 'production_complete', 'subject_production_complete', article.editor_emails(), {'article': article, 'assignment': assignment}, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def fire_proofing_manager_assignment(**kwargs): request = kwargs['request'] proofing_assignment = kwargs['proofing_assignment'] article = proofing_assignment.article description = '{0} has been assigned as proofing manager for {1}'.format( proofing_assignment.proofing_manager.full_name(), article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Manager Assigned', 'target': article} context = {'request': request, 'proofing_assignment': proofing_assignment, 'article': article} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofing_manager', 'subject_notify_proofing_manager', proofing_assignment.proofing_manager.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def cancel_proofing_task(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] user_content_message = kwargs.get('user_content_message', '') description = 'Proofing request for article {0} from {1} has been cancelled by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Cancelled', 'target': article} context = {'request': request, 'proofing_task': proofing_task, 'user_content_message': user_content_message} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_cancelled', 'subject_notify_proofreader_cancelled', proofing_task.proofreader.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def edit_proofing_task(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] description = 'Proofing request for article {0} from {1} has been edited by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) context = {'request': request, 'proofing_task': proofing_task} log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Edited', 'target': article} notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_edited', 'subject_notify_proofreader_edited', proofing_task.proofreader.email, context, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def notify_proofreader(**kwargs): request = kwargs['request'] article = kwargs['article'] proofing_task = kwargs['proofing_task'] user_content_message = kwargs['user_content_message'] description = 'Proofing request for article {0} from {1} has been requested by {2}'.format( article.title, proofing_task.proofreader.full_name(), request.user.full_name() ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Requested', 'target': article} notify_helpers.send_email_with_body_from_user(request, 'subject_notify_proofreader_assignment', proofing_task.proofreader.email, user_content_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofreader_decision(**kwargs): request = kwargs['request'] proofing_task = kwargs['proofing_task'] decision = kwargs['decision'] description = '{0} has made a decision for proofing task on {1}: {2}'.format( proofing_task.proofreader.full_name(), proofing_task.round.assignment.article.title, decision ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Update', 'target': proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Article Proofreading Update', proofing_task.round.assignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofreader_complete_notification(**kwargs): request = kwargs['request'] proofing_task = kwargs['proofing_task'] article = kwargs['article'] description = '{0} has completed a proofing task for {1}'.format( proofing_task.proofreader.full_name(), article.title, ) notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'notify_proofreader_complete', 'subject_notify_proofreader_complete', proofing_task.round.assignment.proofing_manager.email, {'proofing_task': proofing_task}) def send_proofing_typeset_request(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] article = kwargs['article'] user_content_message = kwargs['user_content_message'] skip = kwargs['skip'] description = '{0} has requested typesetting updates from {1} for {2}'.format( request.user.full_name(), typeset_task.typesetter.full_name(), article.title, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Updates Requested', 'target': article} if not skip: notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_user( request, 'subject_notify_typesetter_proofing_changes', typeset_task.typesetter.email, user_content_message, log_dict=log_dict) def send_proofing_typeset_decision(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] decision = kwargs['decision'] description = '{0} has made a decision for proofing task on {1}: {2}'.format( typeset_task.typesetter.full_name(), typeset_task.proofing_task.round.assignment.article.title, decision ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Typesetting', 'target': typeset_task.proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'Proofing Typesetting Changes', typeset_task.proofing_task.round.assignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_corrections_complete(**kwargs): request = kwargs['request'] typeset_task = kwargs['typeset_task'] article = kwargs['article'] description = '{0} has completed corrections task for article {1} (proofing task {2}'.format( request.user.full_name(), article.title, typeset_task.pk, ) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Typesetting Complete', 'target': typeset_task.proofing_task.round.assignment.article} notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_corrections_complete', article.proofingassignment.proofing_manager.email, description, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofing_ack(**kwargs): request = kwargs['request'] user_message = kwargs['user_message'] article = kwargs['article'] model_object = kwargs['model_object'] model_name = kwargs['model_name'] skip = kwargs['skip'] description = "{0} has acknowledged a task , {1}, by {2} for article {3}".format(request.user, model_name, model_object.actor().full_name(), article.title) if not skip: notify_helpers.send_email_with_body_from_user(request, 'Proofing Acknowledgement', model_object.actor().email, user_message) notify_helpers.send_slack(request, description, ['slack_editors']) def send_proofing_complete(**kwargs): request = kwargs['request'] user_message = kwargs['user_message'] article = kwargs['article'] skip = kwargs['skip'] description = "Proofing is now complete for {0}".format(article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Complete', 'target': article} if not skip: notify_helpers.send_email_with_body_from_user(request, 'subject_notify_editor_proofing_complete', article.editor_emails(), user_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) def send_author_publication_notification(**kwargs): request = kwargs['request'] article = kwargs['article'] user_message = kwargs['user_message'] section_editors = kwargs['section_editors'] peer_reviewers = kwargs['peer_reviewers'] description = "Article, {0}, set for publication on {1}, by {2}".format(article.title, article.date_published, request.user.full_name()) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Article Published', 'target': article} notify_helpers.send_email_with_body_from_user(request, '{0} Publication'.format(article.title), article.correspondence_author.email, user_message, log_dict=log_dict) notify_helpers.send_slack(request, description, ['slack_editors']) # Check for SEs and PRs and notify them as well if section_editors: for editor in article.section_editors(): notify_helpers.send_email_with_body_from_setting_template(request, 'section_editor_pub_notification', 'Article set for publication', editor.email, {'article': article, 'editor': editor}) if peer_reviewers: for reviewer in article.peer_reviewers(): notify_helpers.send_email_with_body_from_setting_template(request, 'peer_reviewer_pub_notification', 'Article set for publication', reviewer.email, {'article': article, 'reviewer': reviewer}) def review_sec_override_notification(**kwargs): request = kwargs['request'] override = kwargs['override'] description = "{0} overrode their access to {1}".format(override.editor.full_name(), override.article.title) log_dict = {'level': 'Warning', 'action_text': description, 'types': 'Security Override', 'target': override.article} notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_user(request, 'Review Security Override', request.journal.editor_emails, description, log_dict=log_dict) def send_draft_decison(**kwargs): request = kwargs['request'] draft = kwargs['draft'] article = kwargs['article'] emails = article.section.editor_emails() if not emails: emails = request.journal.editor_emails description = "Section Editor {0} has drafted a decision for Article {1}".format( draft.section_editor.full_name(), article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Draft Decision', 'target': article} notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'draft_editor_message', 'subject_draft_editor_message', emails, {'draft': draft, 'article': article}, log_dict=log_dict) def send_author_copyedit_complete(**kwargs): request = kwargs['request'] copyedit = kwargs['copyedit'] author_review = kwargs['author_review'] description = "Author {0} has completed their copyediting task for article {1}".format( author_review.author.full_name(), copyedit.article.title, ) notify_helpers.send_slack(request, description, ['slack_editors']) notify_helpers.send_email_with_body_from_setting_template(request, 'author_copyedit_complete', 'subject_author_copyedit_complete', copyedit.editor.email, {'copyedit': copyedit, 'author_review': author_review}) def preprint_submission(**kwargs): """ Called by events.Event.ON_PRPINT_SUBMISSIONS, logs and emails the author and preprint editor. :param kwargs: Dictionary containing article and request objects :return: None """ request = kwargs.get('request') article = kwargs.get('article') description = '{author} has submitted a new preprint titled {title}.'.format(author=request.user.full_name(), title=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Submission', 'target': article} # Send an email to the user context = {'article': article} template = request.press.preprint_submission email_text = render_template.get_message_content(request, context, template, template_is_setting=True) notify_helpers.send_email_with_body_from_user(request, 'Preprint Submission', request.user.email, email_text, log_dict=log_dict) # Send an email to the preprint editor url = request.press_base_url + reverse('preprints_manager_article', kwargs={'article_id': article.pk}) editor_email_text = 'A new preprint has been submitted to {press}: <a href="{url}">{title}</a>.'.format( press=request.press.name, url=url, title=article.title ) for editor in request.press.preprint_editors(): notify_helpers.send_email_with_body_from_user(request, 'Preprint Submission', editor.email, editor_email_text, log_dict=log_dict) def preprint_publication(**kwargs): """ Called by events.Event.ON_PREPRINT_PUBLICATIONS handles logging and emails. :param kwargs: Dictionary containing article and request objects :return: None """ request = kwargs.get('request') article = kwargs.get('article') description = '{editor} has published a preprint titled {title}.'.format(editor=request.user.full_name(), title=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Preprint Publication', 'target': article} util_models.LogEntry.add_entry('Publication', description, 'Info', request.user, request, article) # Send an email to the article owner. context = {'article': article} template = request.press.preprint_publication email_text = render_template.get_message_content(request, context, template, template_is_setting=True) notify_helpers.send_email_with_body_from_user(request, ' Preprint Submission Decision', article.owner.email, email_text, log_dict=log_dict) # Stops this notification being sent multiple times.c article.preprint_decision_notification = True article.save() def preprint_comment(**kwargs): request = kwargs.get('request') article = kwargs.get('article') email_text = 'A comment has been made on your article {article}, you can moderate comments ' \ '<a href="{base_url}{url}">on the journal site</a>.'.format( article=article.title, base_url=request.press_base_url, url=reverse('preprints_comments', kwargs={'article_id': article.pk})) description = '{author} commented on {article}'.format(author=request.user.full_name(), article=article.title) log_dict = {'level': 'Info', 'action_text': description, 'types': 'Preprint Comment', 'target': article} notify_helpers.send_email_with_body_from_user(request, ' Preprint Comment', article.owner.email, email_text, log_dict=log_dict) def send_cancel_corrections(**kwargs): request = kwargs.get('request') article = kwargs.get('article') correction = kwargs.get('correction') description = '{user} has cancelled correction task {task}'.format( user=request.user, task=correction, ) log_dict = { 'level': 'Info', 'action_text': description, 'types': 'Correction Cancelled', 'target': article, } notify_helpers.send_email_with_body_from_setting_template( request, 'notify_correction_cancelled', 'subject_notify_correction_cancelled', correction.typesetter.email, context=kwargs, log_dict=log_dict, )
Change set_var_ics so it doesn't fail if there are extra things in the passed in KeyedList.
""" Starting point for DEM retrieval utilities. """ from math import pi, sin, cos from os import unlink, close from itertools import product from tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from PIL import Image import numpy from .. import save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): """ Tilestache-compatible seeding layer for preparing tiled data. Intended for use in hillup-seed.py script for preparing a tile directory. """ def __init__(self, demdir, tiledir, tmpdir, source): """ """ cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile()) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: """ TileStache provider for generating tiles of DEM slope and aspect data. Source parameter can be "srtm-ned" (default) or "ned-only". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache. """ def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): """ Return an instance of SlopeAndAspect for requested area. """ assert srs == webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) # # Prepare information for datasets of the desired extent and projection. # xres = (xmax - xmin) / width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres # # Reproject and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') elevation = numpy.zeros((width+2, height+2), numpy.float32) for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) minlon, minlat, z = cs2cs.TransformPoint(xmin, ymin) maxlon, maxlat, z = cs2cs.TransformPoint(xmax, ymax) try: handle, filename = mkstemp(dir=self.tmpdir, prefix='render-area-provider-', suffix='.tif') close(handle) ds_area = driver.Create(filename, width+2, height+2, 1, gdal.GDT_Float32) ds_area.SetGeoTransform(buffered_xform) ds_area.SetProjection(area_wkt) ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density across source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / ds_area.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, ds_area, ds_dem.GetProjection(), ds_area.GetProjection(), resample) ds_dem.FlushCache() if proportion == 1: elevation = ds_area.ReadAsArray() else: elevation += ds_area.ReadAsArray() * proportion ds_area.FlushCache() finally: unlink(filename) # # Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: """ TileStache response object with PIL-like save() and crop() methods. This object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache. """ def __init__(self, tmpdir, slope, aspect, wkt, xform): """ Instantiate with array of slope and aspect, and minimal geographic information. """ self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format): """ Save a two-band GeoTIFF to output file-like object. """ if format != 'TIFF': raise Exception('File format other than TIFF for slope and aspect: "%s"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): """ Returns a rectangular region from the current image. Box is a 4-tuple with left, upper, right, and lower pixels. Not yet implemented! """ raise NotImplementedError() def choose_providers_srtm(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as SRTM1 or SRTM3, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as NED10m or NED1km, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def calculate_slope_aspect(elevation, xres, yres, z=1.0): """ Return a pair of arrays 2 pixels smaller than the input elevation array. Slope is returned in radians, from 0 for sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 """ width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \ - (window[2] + window[5] + window[5] + window[8])) \ / (8.0 * xres); y = ((window[6] + window[7] + window[7] + window[8]) \ - (window[0] + window[1] + window[1] + window[2])) \ / (8.0 * yres); # in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): """ Load external function based on a path. Example funcpath: "Module.Submodule:Function". """ modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals()) return _func Cordoned off dataset creation with new ds_composite """ Starting point for DEM retrieval utilities. """ from math import pi, sin, cos from os import unlink, close from itertools import product from tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from PIL import Image import numpy from .. import save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): """ Tilestache-compatible seeding layer for preparing tiled data. Intended for use in hillup-seed.py script for preparing a tile directory. """ def __init__(self, demdir, tiledir, tmpdir, source): """ """ cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile()) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: """ TileStache provider for generating tiles of DEM slope and aspect data. Source parameter can be "srtm-ned" (default) or "ned-only". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache. """ def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): """ Return an instance of SlopeAndAspect for requested area. """ assert srs == webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) # # Prepare information for datasets of the desired extent and projection. # xres = (xmax - xmin) / width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres def make_dataset(width, height, xform, wkt, nodata, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') area = driver.Create(filename, width, height, 1, gdal.GDT_Float32) area.SetGeoTransform(xform) area.SetProjection(wkt) area.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * nodata, 0, 0) area.GetRasterBand(1).SetNoDataValue(nodata) return area # # Reproject and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') elevation = numpy.zeros((width+2, height+2), numpy.float32) nodata = -9999 ds_composite = make_dataset(width+2, height+2, buffered_xform, area_wkt, nodata, self.tmpdir) for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) minlon, minlat, z = cs2cs.TransformPoint(xmin, ymin) maxlon, maxlat, z = cs2cs.TransformPoint(xmax, ymax) try: ds_area = make_dataset(width+2, height+2, buffered_xform, area_wkt, nodata, self.tmpdir) ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): print ds_dem.GetFileList(), ds_dem.RasterCount # estimate the raster density across source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / ds_area.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, ds_area, ds_dem.GetProjection(), ds_area.GetProjection(), resample) ds_dem.FlushCache() if proportion == 1: elevation = ds_area.ReadAsArray() else: elevation += ds_area.ReadAsArray() * proportion ds_area.FlushCache() finally: #print ds_area.GetMaskBand() print ds_area.ReadAsArray() unlink(ds_area.GetFileList()[0]) # # Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) unlink(ds_composite.GetFileList()[0]) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: """ TileStache response object with PIL-like save() and crop() methods. This object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache. """ def __init__(self, tmpdir, slope, aspect, wkt, xform): """ Instantiate with array of slope and aspect, and minimal geographic information. """ self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format): """ Save a two-band GeoTIFF to output file-like object. """ if format != 'TIFF': raise Exception('File format other than TIFF for slope and aspect: "%s"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): """ Returns a rectangular region from the current image. Box is a 4-tuple with left, upper, right, and lower pixels. Not yet implemented! """ raise NotImplementedError() def choose_providers_srtm(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as SRTM1 or SRTM3, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as NED10m or NED1km, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def calculate_slope_aspect(elevation, xres, yres, z=1.0): """ Return a pair of arrays 2 pixels smaller than the input elevation array. Slope is returned in radians, from 0 for sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 """ width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \ - (window[2] + window[5] + window[5] + window[8])) \ / (8.0 * xres); y = ((window[6] + window[7] + window[7] + window[8]) \ - (window[0] + window[1] + window[1] + window[2])) \ / (8.0 * yres); # in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): """ Load external function based on a path. Example funcpath: "Module.Submodule:Function". """ modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals()) return _func
# Giles: y.py # Copyright 2012 Phil Bordelon # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from giles.state import State from giles.games.game import Game from giles.games.seat import Seat # What are the minimum and maximum sizes for the board? Y_MIN_SIZE = 2 Y_MAX_SIZE = 26 # . 0 # . . 1 # . . . 2 # . . . . 3 # 0 1 2 3 # # (1, 2) is adjacent to (1, 1), (2, 2), (0, 2), (1, 3), (2, 3), and (0, 1). Y_DELTAS = ((0, -1), (0, 1), (-1, 0), (1, 0), (1, 1), (-1, -1)) # Because we're lazy and use a square board despite the shape of the Y, we # fill the rest of the square with invalid characters that match neither # side. Define white and black here too. INVALID = "invalid" WHITE = "white" BLACK = "black" COL_CHARACTERS="abcdefghijklmnopqrstuvwxyz" class Y(Game): """A Y game table implementation. Invented by Claude Shannon. Adapted from my Volity implementation. """ def __init__(self, server, table_name): super(Y, self).__init__(server, table_name) self.game_display_name = "Y" self.game_name = "y" self.seats = [ Seat("White"), Seat("Black"), ] self.min_players = 2 self.max_players = 2 self.state = State("config") self.prefix = "(^RY^~): " self.log_prefix = "%s/%s " % (self.game_display_name, self.game_name) self.debug = True # Y-specific guff. self.seats[0].color = WHITE self.seats[0].color_code = "^W" self.seats[1].color = BLACK self.seats[1].color_code = "^K" self.board = None self.size = 19 self.turn = None self.turn_number = 0 self.move_list = [] self.resigner = None self.last_x = None self.last_y = None self.init_board() def init_board(self): self.board = [] # We're going to be lazy and build a square board, then fill the # half that doesn't make the proper shape with invalid marks. for x in range(self.size): self.board.append([None] * self.size) # Looking at the grid above, you can see that for a given column, # all row values less than that value are invalid. for y in range(x): self.board[x][y] = INVALID # That's it! def set_size(self, player, size_str): if not size_str.isdigit(): player.tell_cc(self.prefix + "You didn't even send a number!\n") return False new_size = int(size_str) if new_size < Y_MIN_SIZE or new_size > Y_MAX_SIZE: player.tell_cc(self.prefix + "Too small or large. Must be 2 to 52 inclusive.\n") return False # Got a valid size. self.size = new_size self.init_board() self.channel.broadcast_cc(self.prefix + "^M%s^~ has changed the size of the board to ^C%s^~.\n" % (player.display_name, str(new_size))) return True def move_to_values(self, move_str): # All valid moves are of the form g22, J15, etc. Ditch blatantly # invalid moves. if type(move_str) != str or len(move_str) < 2 or len(move_str) > 3: return None # First character must be in COL_CHARACTERS. col_char = move_str[0].lower() if col_char not in COL_CHARACTERS: return None else: x = COL_CHARACTERS.index(col_char) # Next one or two must be digits. row_chars = move_str[1:] if not row_chars.isdigit(): return None else: y = int(row_chars) - 1 # Now verify that these are even in range for this board. Remember # that column values greater than the row value are invalid; that # provides a bound on the column value, so we only then need to # check the row against the upper bound. if (x < 0 or x > y or y >= self.size): return None # Valid! return (x, y) def move(self, seat, move_str): # Get the actual values of the move. values = self.move_to_values(move_str) if not values: seat.player.tell_cc(self.prefix + "Invalid move.\n") return None x, y = values if self.board[x][y]: seat.player.tell_cc(self.prefix + "That space is already occupied.\n") return None # Okay, it's an unoccupied space! Let's make the move. self.board[x][y] = seat.color self.channel.broadcast_cc(self.prefix + seat.color_code + "%s^~ has moved to ^C%s^~.\n" % (seat.player.display_name, move_str)) self.last_x = x self.last_y = y return (x, y) def swap(self): # This is an easy one. Take the first move and change the piece # on the board from white to black. self.board[self.move_list[0][0]][self.move_list[0][1]] = BLACK self.channel.broadcast_cc(self.prefix + "^Y%s^~ has swapped ^WWhite^~'s first move.\n" % self.seats[1].player.display_name) def print_board(self, player): slash_line = " " char_line = "" for x in range(self.size): msg = " " color_char = "^W" if x % 2 == 0: color_char = "^K" slash_line += color_char + "/^~ " char_line += "%s " % COL_CHARACTERS[x] for spc in range(self.size - x): msg += " " for y in range(x + 1): piece = self.board[y][x] if y == self.last_x and x == self.last_y: msg += "^I" if piece == BLACK: msg += "^Kx^~ " elif piece == WHITE: msg += "^Wo^~ " elif y % 2 == 0: msg += "^m,^~ " else: msg += "^M.^~ " msg += str(x + 1) + "\n" player.tell_cc(msg) player.tell_cc(slash_line + "\n") player.tell_cc(char_line + "\n") def get_turn_str(self): if self.state.get() == "playing": if self.seats[0].color == self.turn: color_word = "^WWhite^~" name_word = "^R%s^~" % self.seats[0].player.display_name else: color_word = "^KBlack^~" name_word = "^Y%s^~" % self.seats[1].player.display_name return "It is %s's turn (%s).\n" % (name_word, color_word) else: return "The game is not currently active.\n" def send_board(self): for player in self.channel.listeners: self.print_board(player) def resign(self, seat): # Okay, this person can resign; it's their turn, after all. self.channel.broadcast_cc(self.prefix + "^R%s^~ is resigning from the game.\n" % seat.player.display_name) self.resigner = seat.color return True def show(self, player): self.print_board(player) player.tell_cc(self.get_turn_str()) def show_help(self, player): super(Y, self).show_help(player) player.tell_cc("\nY SETUP PHASE:\n\n") player.tell_cc(" ^!size^. <size>, ^!sz^. Set board to size <size>.\n") player.tell_cc(" ^!ready^., ^!done^., ^!r^., ^!d^. End setup phase.\n") player.tell_cc("\nY PLAY:\n\n") player.tell_cc(" ^!move^. <ln>, ^!play^., ^!mv^., ^!pl^. Make move <ln> (letter number).\n") player.tell_cc(" ^!swap^. Swap the first move (only Black, only their first).\n") player.tell_cc(" ^!resign^. Resign.\n") def handle(self, player, command_str): # Handle common commands. handled = self.handle_common_commands(player, command_str) state = self.state.get() command_bits = command_str.strip().split() primary = command_str.split()[0].lower() if state == "config": if primary in ('size', 'sz'): if len(command_bits) == 2: self.set_size(player, command_bits[1]) else: player.tell_cc(self.prefix + "Invalid size command.\n") handled = True elif primary in ('done', 'ready', 'd', 'r'): self.channel.broadcast_cc(self.prefix + "The game is now ready for players.\n") self.state.set("need_players") handled = True elif state == "need_players": # If both seats are full and the game is active, time to # play! if self.seats[0].player and self.seats[1].player and self.active: self.state.set("playing") self.channel.broadcast_cc(self.prefix + "^WWhite^~: ^R%s^~; ^KBlack^~: ^Y%s^~\n" % (self.seats[0].player.display_name, self.seats[1].player.display_name)) self.turn = WHITE self.turn_number = 1 self.send_board() self.channel.broadcast_cc(self.prefix + self.get_turn_str()) elif state == "playing": made_move = False # For all move types, don't bother if it's not this player's turn. if primary in ('move', 'mv', 'play', 'pl', 'swap', 'resign'): seat = self.get_seat_of_player(player) if not seat: player.tell_cc(self.prefix + "You can't move; you're not playing!\n") return elif seat.color != self.turn: player.tell_cc(self.prefix + "You must wait for your turn to move.\n") return if primary in ('move', 'mv', 'play', 'pl'): if len(command_bits) == 2: success = self.move(seat, command_bits[1]) if success: move = success made_move = True else: player.tell_cc(self.prefix + "Unsuccessful move.\n") else: player.tell_cc(self.prefix + "Unsuccessful move.\n") handled = True elif primary in ('swap',): if self.turn_number == 2 and seat.player == player: self.swap() move = "swap" made_move = True else: player.tell_cc(self.prefix + "Unsuccessful swap.\n") handled = True elif primary in ('resign',): if self.resign(seat): move = "resign" made_move = True handled = True if made_move: self.send_board() self.move_list.append(move) self.turn_number += 1 winner = self.find_winner() if winner: self.resolve(winner) self.finish() else: if self.turn == WHITE: self.turn = BLACK else: self.turn = WHITE self.channel.broadcast_cc(self.prefix + self.get_turn_str()) if not handled: player.tell_cc(self.prefix + "Invalid command.\n") def find_winner(self): # First, check resignations; that's a fast bail. if self.resigner: if self.resigner == WHITE: return self.seats[1].player elif self.resigner == BLACK: return self.seats[0].player else: self.server.log.log(self.log_prefix + "Weirdness; a resign that's not a player.") return None # Well, darn, we have to do actual work. Time for recursion! # To calculate a winner: # - Pick a side. # - For each piece on that side, see if it's connected to # both other sides. If so, that player is a winner. # - If not, there is no winner (as winners must connect all # three sides). self.found_winner = None self.adjacency = [] # Set up our adjacency checker. for i in range(self.size): self.adjacency.append([None] * self.size) # For each piece on the left side of the board... for i in range(self.size): if self.board[0][i]: # We're not touching the other two sides yet. self.touch_bottom = False self.touch_right = False self.update_adjacency(0, i, self.board[0][i]) if self.found_winner == WHITE: return self.seats[0].player elif self.found_winner == BLACK: return self.seats[1].player # No winner yet. return None def update_adjacency(self, x, y, color): # Skip work if a winner's already found. if self.found_winner: return # Skip work if we're off the board. if (x < 0 or x > y or y >= self.size): return # Skip work if we've been here already. if self.adjacency[x][y]: return # Skip work if it's empty or for the other player. this_cell = self.board[x][y] if this_cell != color: return # All right, it's this player's cell. Mark it visited. self.adjacency[x][y] = color # If we're on either the bottom or right edges, mark that. if (y == self.size - 1): self.touch_bottom = True if (x == y): self.touch_right = True # Bail if we've met both win conditions. if self.touch_bottom and self.touch_right: self.found_winner = color # Okay, no winner yet. Recurse on the six adjacent cells. for x_delta, y_delta in Y_DELTAS: self.update_adjacency(x + x_delta, y + y_delta, color) def resolve(self, winner): self.channel.broadcast_cc(self.prefix + "^C%s^~ wins!\n" % (winner.display_name)) Y: Use right size limits. They were hardcoded; use the constants. # Giles: y.py # Copyright 2012 Phil Bordelon # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from giles.state import State from giles.games.game import Game from giles.games.seat import Seat # What are the minimum and maximum sizes for the board? Y_MIN_SIZE = 2 Y_MAX_SIZE = 26 # . 0 # . . 1 # . . . 2 # . . . . 3 # 0 1 2 3 # # (1, 2) is adjacent to (1, 1), (2, 2), (0, 2), (1, 3), (2, 3), and (0, 1). Y_DELTAS = ((0, -1), (0, 1), (-1, 0), (1, 0), (1, 1), (-1, -1)) # Because we're lazy and use a square board despite the shape of the Y, we # fill the rest of the square with invalid characters that match neither # side. Define white and black here too. INVALID = "invalid" WHITE = "white" BLACK = "black" COL_CHARACTERS="abcdefghijklmnopqrstuvwxyz" class Y(Game): """A Y game table implementation. Invented by Claude Shannon. Adapted from my Volity implementation. """ def __init__(self, server, table_name): super(Y, self).__init__(server, table_name) self.game_display_name = "Y" self.game_name = "y" self.seats = [ Seat("White"), Seat("Black"), ] self.min_players = 2 self.max_players = 2 self.state = State("config") self.prefix = "(^RY^~): " self.log_prefix = "%s/%s " % (self.game_display_name, self.game_name) self.debug = True # Y-specific guff. self.seats[0].color = WHITE self.seats[0].color_code = "^W" self.seats[1].color = BLACK self.seats[1].color_code = "^K" self.board = None self.size = 19 self.turn = None self.turn_number = 0 self.move_list = [] self.resigner = None self.last_x = None self.last_y = None self.init_board() def init_board(self): self.board = [] # We're going to be lazy and build a square board, then fill the # half that doesn't make the proper shape with invalid marks. for x in range(self.size): self.board.append([None] * self.size) # Looking at the grid above, you can see that for a given column, # all row values less than that value are invalid. for y in range(x): self.board[x][y] = INVALID # That's it! def set_size(self, player, size_str): if not size_str.isdigit(): player.tell_cc(self.prefix + "You didn't even send a number!\n") return False new_size = int(size_str) if new_size < Y_MIN_SIZE or new_size > Y_MAX_SIZE: player.tell_cc(self.prefix + "Too small or large. Must be %s to %s inclusive.\n" % (Y_MIN_SIZE, Y_MAX_SIZE)) return False # Got a valid size. self.size = new_size self.init_board() self.channel.broadcast_cc(self.prefix + "^M%s^~ has changed the size of the board to ^C%s^~.\n" % (player.display_name, str(new_size))) return True def move_to_values(self, move_str): # All valid moves are of the form g22, J15, etc. Ditch blatantly # invalid moves. if type(move_str) != str or len(move_str) < 2 or len(move_str) > 3: return None # First character must be in COL_CHARACTERS. col_char = move_str[0].lower() if col_char not in COL_CHARACTERS: return None else: x = COL_CHARACTERS.index(col_char) # Next one or two must be digits. row_chars = move_str[1:] if not row_chars.isdigit(): return None else: y = int(row_chars) - 1 # Now verify that these are even in range for this board. Remember # that column values greater than the row value are invalid; that # provides a bound on the column value, so we only then need to # check the row against the upper bound. if (x < 0 or x > y or y >= self.size): return None # Valid! return (x, y) def move(self, seat, move_str): # Get the actual values of the move. values = self.move_to_values(move_str) if not values: seat.player.tell_cc(self.prefix + "Invalid move.\n") return None x, y = values if self.board[x][y]: seat.player.tell_cc(self.prefix + "That space is already occupied.\n") return None # Okay, it's an unoccupied space! Let's make the move. self.board[x][y] = seat.color self.channel.broadcast_cc(self.prefix + seat.color_code + "%s^~ has moved to ^C%s^~.\n" % (seat.player.display_name, move_str)) self.last_x = x self.last_y = y return (x, y) def swap(self): # This is an easy one. Take the first move and change the piece # on the board from white to black. self.board[self.move_list[0][0]][self.move_list[0][1]] = BLACK self.channel.broadcast_cc(self.prefix + "^Y%s^~ has swapped ^WWhite^~'s first move.\n" % self.seats[1].player.display_name) def print_board(self, player): slash_line = " " char_line = "" for x in range(self.size): msg = " " color_char = "^W" if x % 2 == 0: color_char = "^K" slash_line += color_char + "/^~ " char_line += "%s " % COL_CHARACTERS[x] for spc in range(self.size - x): msg += " " for y in range(x + 1): piece = self.board[y][x] if y == self.last_x and x == self.last_y: msg += "^I" if piece == BLACK: msg += "^Kx^~ " elif piece == WHITE: msg += "^Wo^~ " elif y % 2 == 0: msg += "^m,^~ " else: msg += "^M.^~ " msg += str(x + 1) + "\n" player.tell_cc(msg) player.tell_cc(slash_line + "\n") player.tell_cc(char_line + "\n") def get_turn_str(self): if self.state.get() == "playing": if self.seats[0].color == self.turn: color_word = "^WWhite^~" name_word = "^R%s^~" % self.seats[0].player.display_name else: color_word = "^KBlack^~" name_word = "^Y%s^~" % self.seats[1].player.display_name return "It is %s's turn (%s).\n" % (name_word, color_word) else: return "The game is not currently active.\n" def send_board(self): for player in self.channel.listeners: self.print_board(player) def resign(self, seat): # Okay, this person can resign; it's their turn, after all. self.channel.broadcast_cc(self.prefix + "^R%s^~ is resigning from the game.\n" % seat.player.display_name) self.resigner = seat.color return True def show(self, player): self.print_board(player) player.tell_cc(self.get_turn_str()) def show_help(self, player): super(Y, self).show_help(player) player.tell_cc("\nY SETUP PHASE:\n\n") player.tell_cc(" ^!size^. <size>, ^!sz^. Set board to size <size>.\n") player.tell_cc(" ^!ready^., ^!done^., ^!r^., ^!d^. End setup phase.\n") player.tell_cc("\nY PLAY:\n\n") player.tell_cc(" ^!move^. <ln>, ^!play^., ^!mv^., ^!pl^. Make move <ln> (letter number).\n") player.tell_cc(" ^!swap^. Swap the first move (only Black, only their first).\n") player.tell_cc(" ^!resign^. Resign.\n") def handle(self, player, command_str): # Handle common commands. handled = self.handle_common_commands(player, command_str) state = self.state.get() command_bits = command_str.strip().split() primary = command_str.split()[0].lower() if state == "config": if primary in ('size', 'sz'): if len(command_bits) == 2: self.set_size(player, command_bits[1]) else: player.tell_cc(self.prefix + "Invalid size command.\n") handled = True elif primary in ('done', 'ready', 'd', 'r'): self.channel.broadcast_cc(self.prefix + "The game is now ready for players.\n") self.state.set("need_players") handled = True elif state == "need_players": # If both seats are full and the game is active, time to # play! if self.seats[0].player and self.seats[1].player and self.active: self.state.set("playing") self.channel.broadcast_cc(self.prefix + "^WWhite^~: ^R%s^~; ^KBlack^~: ^Y%s^~\n" % (self.seats[0].player.display_name, self.seats[1].player.display_name)) self.turn = WHITE self.turn_number = 1 self.send_board() self.channel.broadcast_cc(self.prefix + self.get_turn_str()) elif state == "playing": made_move = False # For all move types, don't bother if it's not this player's turn. if primary in ('move', 'mv', 'play', 'pl', 'swap', 'resign'): seat = self.get_seat_of_player(player) if not seat: player.tell_cc(self.prefix + "You can't move; you're not playing!\n") return elif seat.color != self.turn: player.tell_cc(self.prefix + "You must wait for your turn to move.\n") return if primary in ('move', 'mv', 'play', 'pl'): if len(command_bits) == 2: success = self.move(seat, command_bits[1]) if success: move = success made_move = True else: player.tell_cc(self.prefix + "Unsuccessful move.\n") else: player.tell_cc(self.prefix + "Unsuccessful move.\n") handled = True elif primary in ('swap',): if self.turn_number == 2 and seat.player == player: self.swap() move = "swap" made_move = True else: player.tell_cc(self.prefix + "Unsuccessful swap.\n") handled = True elif primary in ('resign',): if self.resign(seat): move = "resign" made_move = True handled = True if made_move: self.send_board() self.move_list.append(move) self.turn_number += 1 winner = self.find_winner() if winner: self.resolve(winner) self.finish() else: if self.turn == WHITE: self.turn = BLACK else: self.turn = WHITE self.channel.broadcast_cc(self.prefix + self.get_turn_str()) if not handled: player.tell_cc(self.prefix + "Invalid command.\n") def find_winner(self): # First, check resignations; that's a fast bail. if self.resigner: if self.resigner == WHITE: return self.seats[1].player elif self.resigner == BLACK: return self.seats[0].player else: self.server.log.log(self.log_prefix + "Weirdness; a resign that's not a player.") return None # Well, darn, we have to do actual work. Time for recursion! # To calculate a winner: # - Pick a side. # - For each piece on that side, see if it's connected to # both other sides. If so, that player is a winner. # - If not, there is no winner (as winners must connect all # three sides). self.found_winner = None self.adjacency = [] # Set up our adjacency checker. for i in range(self.size): self.adjacency.append([None] * self.size) # For each piece on the left side of the board... for i in range(self.size): if self.board[0][i]: # We're not touching the other two sides yet. self.touch_bottom = False self.touch_right = False self.update_adjacency(0, i, self.board[0][i]) if self.found_winner == WHITE: return self.seats[0].player elif self.found_winner == BLACK: return self.seats[1].player # No winner yet. return None def update_adjacency(self, x, y, color): # Skip work if a winner's already found. if self.found_winner: return # Skip work if we're off the board. if (x < 0 or x > y or y >= self.size): return # Skip work if we've been here already. if self.adjacency[x][y]: return # Skip work if it's empty or for the other player. this_cell = self.board[x][y] if this_cell != color: return # All right, it's this player's cell. Mark it visited. self.adjacency[x][y] = color # If we're on either the bottom or right edges, mark that. if (y == self.size - 1): self.touch_bottom = True if (x == y): self.touch_right = True # Bail if we've met both win conditions. if self.touch_bottom and self.touch_right: self.found_winner = color # Okay, no winner yet. Recurse on the six adjacent cells. for x_delta, y_delta in Y_DELTAS: self.update_adjacency(x + x_delta, y + y_delta, color) def resolve(self, winner): self.channel.broadcast_cc(self.prefix + "^C%s^~ wins!\n" % (winner.display_name))
# The Hazard Library # Copyright (C) 2016-2017 GEM Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest import numpy import numpy.testing as npt from openquake.baselib.general import DictArray from openquake.hazardlib.source import NonParametricSeismicSource from openquake.hazardlib.source.rupture import BaseRupture from openquake.hazardlib.sourceconverter import SourceConverter from openquake.hazardlib.const import TRT from openquake.hazardlib.geo.surface import PlanarSurface, SimpleFaultSurface from openquake.hazardlib.geo import Point, Line from openquake.hazardlib.geo.geodetic import point_at from openquake.hazardlib.calc.filters import SourceFilter from openquake.hazardlib.calc.hazard_curve import calc_hazard_curves from openquake.hazardlib.calc.hazard_curve import classical from openquake.hazardlib.gsim.sadigh_1997 import SadighEtAl1997 from openquake.hazardlib.gsim.si_midorikawa_1999 import SiMidorikawa1999SInter from openquake.hazardlib.gsim.campbell_2003 import Campbell2003 from openquake.hazardlib.site import Site, SiteCollection from openquake.hazardlib.pmf import PMF from openquake.hazardlib.sourceconverter import SourceGroup from openquake.hazardlib import nrml def _create_rupture(distance, magnitude, tectonic_region_type=TRT.ACTIVE_SHALLOW_CRUST): # Return a rupture with a fixed geometry located at a given r_jb distance # from a site located at (0.0, 0.0). # parameter float distance: # Joyner and Boore rupture-site distance # parameter float magnitude: # Rupture magnitude # Find the point at a given distance lonp, latp = point_at(0.0, 0.0, 90., distance) mag = magnitude rake = 0.0 tectonic_region_type = tectonic_region_type hypocenter = Point(lonp, latp, 2.5) surface = PlanarSurface.from_corner_points(0.01, Point(lonp, -1, 0.), Point(lonp, +1, 0.), Point(lonp, +1, 5.), Point(lonp, -1, 5.)) surface = SimpleFaultSurface.from_fault_data( fault_trace=Line([Point(lonp, -1), Point(lonp, 1)]), upper_seismogenic_depth=0.0, lower_seismogenic_depth=5.0, dip=90.0, mesh_spacing=1.0) # check effective rupture-site distance from openquake.hazardlib.geo.mesh import Mesh mesh = Mesh(numpy.array([0.0]), numpy.array([0.0])) assert abs(surface.get_joyner_boore_distance(mesh)-distance) < 1e-2 return BaseRupture(mag, rake, tectonic_region_type, hypocenter, surface, NonParametricSeismicSource) def _create_non_param_sourceA(rjb, magnitude, pmf, tectonic_region_type=TRT.ACTIVE_SHALLOW_CRUST): # Create a non-parametric source rupture = _create_rupture(rjb, magnitude) pmf = pmf data = [(rupture, pmf)] return NonParametricSeismicSource('0', 'test', tectonic_region_type, data) class HazardCurvesTestCase01(unittest.TestCase): def setUp(self): self.src1 = _create_non_param_sourceA(15., 6.3, PMF([(0.6, 0), (0.4, 1)])) self.src2 = _create_non_param_sourceA(10., 6.0, PMF([(0.7, 0), (0.3, 1)])) self.src3 = _create_non_param_sourceA(10., 6.0, PMF([(0.7, 0), (0.3, 1)]), TRT.GEOTHERMAL) site = Site(Point(0.0, 0.0), 800, True, z1pt0=100., z2pt5=1.) s_filter = SourceFilter(SiteCollection([site]), {}) self.sites = s_filter self.imtls = DictArray({'PGA': [0.01, 0.1, 0.3]}) gsim = SadighEtAl1997() gsim.minimum_distance = 12 # test minimum_distance self.gsim_by_trt = {TRT.ACTIVE_SHALLOW_CRUST: gsim} def test_hazard_curve_X(self): # Test the former calculator curves = calc_hazard_curves([self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] self.assertAlmostEqual(0.3, crv[0]) def test_hazard_curve_A(self): # Test back-compatibility # Classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.30000, 0.2646, 0.0625]), crv, decimal=4) def test_hazard_curve_B(self): # Test simple calculation group = SourceGroup( TRT.ACTIVE_SHALLOW_CRUST, [self.src2], 'test', 'indep', 'indep') groups = [group] curves = calc_hazard_curves(groups, self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) npt.assert_almost_equal(numpy.array([0.30000, 0.2646, 0.0625]), curves[0][0], decimal=4) class HazardCurvePerGroupTest(HazardCurvesTestCase01): def test_mutually_exclusive_ruptures(self): # Test the calculation of hazard curves using mutually exclusive # ruptures for a single source gsim_by_trt = [SadighEtAl1997()] rupture = _create_rupture(10., 6.) data = [(rupture, PMF([(0.7, 0), (0.3, 1)])), (rupture, PMF([(0.6, 0), (0.4, 1)]))] src = NonParametricSeismicSource('0', 'test', TRT.ACTIVE_SHALLOW_CRUST, data) src.src_group_id = [0] group = SourceGroup( src.tectonic_region_type, [src], 'test', 'indep', 'mutex') param = dict(imtls=self.imtls) crv = classical(group, self.sites, gsim_by_trt, param)[0] npt.assert_almost_equal(numpy.array([0.35000, 0.32497, 0.10398]), crv[0].array[:, 0], decimal=4) def test_raise_error_non_uniform_group(self): # Test that the uniformity of a group (in terms of tectonic region) # is correctly checked self.assertRaises( AssertionError, SourceGroup, TRT.ACTIVE_SHALLOW_CRUST, [self.src1, self.src3], 'test', 'indep', 'indep') class HazardCurvesTestCase02(HazardCurvesTestCase01): def test_hazard_curve_A(self): # Test classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src1], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.40000, 0.36088, 0.07703]), crv, decimal=4) def test_hazard_curve_B(self): # Test classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src1, self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.58000, 0.53, 0.1347]), crv, decimal=4) class NankaiTestCase(unittest.TestCase): # use a source model for the Nankai region provided by M. Pagani def test(self): source_model = os.path.join(os.path.dirname(__file__), 'nankai.xml') groups = nrml.parse(source_model, SourceConverter( investigation_time=50., rupture_mesh_spacing=2.)) site = Site(Point(135.68, 35.68), 800, True, z1pt0=100., z2pt5=1.) s_filter = SourceFilter(SiteCollection([site]), {}) imtls = DictArray({'PGV': [20, 40, 80]}) gsim_by_trt = {'Subduction Interface': SiMidorikawa1999SInter()} hcurves = calc_hazard_curves(groups, s_filter, imtls, gsim_by_trt) npt.assert_almost_equal( [1.11315443e-01, 3.92180097e-03, 3.02064427e-05], hcurves['PGV'][0]) class MultiPointTestCase(unittest.TestCase): def test(self): d = os.path.dirname(os.path.dirname(__file__)) source_model = os.path.join(d, 'source_model/multi-point-source.xml') groups = nrml.parse(source_model, SourceConverter( investigation_time=50., rupture_mesh_spacing=2.)) site = Site(Point(0.1, 0.1), 800, True, z1pt0=100., z2pt5=1.) sitecol = SiteCollection([site]) imtls = DictArray({'PGA': [0.01, 0.02, 0.04, 0.08, 0.16]}) gsim_by_trt = {'Stable Continental Crust': Campbell2003()} hcurves = calc_hazard_curves(groups, sitecol, imtls, gsim_by_trt) expected = [0.99999778, 0.9084039, 0.148975348, 0.0036909656, 2.76326e-05] npt.assert_almost_equal(hcurves['PGA'][0], expected) # splitting in point sources [[mps1, mps2]] = groups psources = list(mps1) + list(mps2) hcurves = calc_hazard_curves(psources, sitecol, imtls, gsim_by_trt) npt.assert_almost_equal(hcurves['PGA'][0], expected) Fixed hazard tests Former-commit-id: e9cf892d1c7eca2b126f78367fb669f9f420d5a5 # The Hazard Library # Copyright (C) 2016-2017 GEM Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest import numpy import numpy.testing as npt from openquake.baselib.general import DictArray from openquake.hazardlib.source import NonParametricSeismicSource from openquake.hazardlib.source.rupture import BaseRupture from openquake.hazardlib.sourceconverter import SourceConverter from openquake.hazardlib.const import TRT from openquake.hazardlib.geo.surface import PlanarSurface, SimpleFaultSurface from openquake.hazardlib.geo import Point, Line from openquake.hazardlib.geo.geodetic import point_at from openquake.hazardlib.calc.filters import SourceFilter from openquake.hazardlib.calc.hazard_curve import calc_hazard_curves from openquake.hazardlib.calc.hazard_curve import classical from openquake.hazardlib.gsim.sadigh_1997 import SadighEtAl1997 from openquake.hazardlib.gsim.si_midorikawa_1999 import SiMidorikawa1999SInter from openquake.hazardlib.gsim.campbell_2003 import Campbell2003 from openquake.hazardlib.site import Site, SiteCollection from openquake.hazardlib.pmf import PMF from openquake.hazardlib.sourceconverter import SourceGroup from openquake.hazardlib import nrml def _create_rupture(distance, magnitude, tectonic_region_type=TRT.ACTIVE_SHALLOW_CRUST): # Return a rupture with a fixed geometry located at a given r_jb distance # from a site located at (0.0, 0.0). # parameter float distance: # Joyner and Boore rupture-site distance # parameter float magnitude: # Rupture magnitude # Find the point at a given distance lonp, latp = point_at(0.0, 0.0, 90., distance) mag = magnitude rake = 0.0 tectonic_region_type = tectonic_region_type hypocenter = Point(lonp, latp, 2.5) surface = PlanarSurface.from_corner_points(0.01, Point(lonp, -1, 0.), Point(lonp, +1, 0.), Point(lonp, +1, 5.), Point(lonp, -1, 5.)) surface = SimpleFaultSurface.from_fault_data( fault_trace=Line([Point(lonp, -1), Point(lonp, 1)]), upper_seismogenic_depth=0.0, lower_seismogenic_depth=5.0, dip=90.0, mesh_spacing=1.0) # check effective rupture-site distance from openquake.hazardlib.geo.mesh import Mesh mesh = Mesh(numpy.array([0.0]), numpy.array([0.0])) assert abs(surface.get_joyner_boore_distance(mesh)-distance) < 1e-2 return BaseRupture(mag, rake, tectonic_region_type, hypocenter, surface, NonParametricSeismicSource) def _create_non_param_sourceA(rjb, magnitude, pmf, tectonic_region_type=TRT.ACTIVE_SHALLOW_CRUST): # Create a non-parametric source rupture = _create_rupture(rjb, magnitude) pmf = pmf data = [(rupture, pmf)] return NonParametricSeismicSource('0', 'test', tectonic_region_type, data) class HazardCurvesTestCase01(unittest.TestCase): def setUp(self): self.src1 = _create_non_param_sourceA(15., 6.3, PMF([(0.6, 0), (0.4, 1)])) self.src2 = _create_non_param_sourceA(10., 6.0, PMF([(0.7, 0), (0.3, 1)])) self.src3 = _create_non_param_sourceA(10., 6.0, PMF([(0.7, 0), (0.3, 1)]), TRT.GEOTHERMAL) site = Site(Point(0.0, 0.0), 800, True, z1pt0=100., z2pt5=1.) s_filter = SourceFilter(SiteCollection([site]), {}) self.sites = s_filter self.imtls = DictArray({'PGA': [0.01, 0.1, 0.3]}) gsim = SadighEtAl1997() gsim.minimum_distance = 12 # test minimum_distance self.gsim_by_trt = {TRT.ACTIVE_SHALLOW_CRUST: gsim} def test_hazard_curve_X(self): # Test the former calculator curves = calc_hazard_curves([self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] self.assertAlmostEqual(0.3, crv[0]) def test_hazard_curve_A(self): # Test back-compatibility # Classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.30000, 0.2646, 0.0625]), crv, decimal=4) def test_hazard_curve_B(self): # Test simple calculation group = SourceGroup( TRT.ACTIVE_SHALLOW_CRUST, [self.src2], 'test', 'indep', 'indep') groups = [group] curves = calc_hazard_curves(groups, self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) npt.assert_almost_equal(numpy.array([0.30000, 0.2646, 0.0625]), curves[0][0], decimal=4) class HazardCurvePerGroupTest(HazardCurvesTestCase01): def test_mutually_exclusive_ruptures(self): # Test the calculation of hazard curves using mutually exclusive # ruptures for a single source gsim_by_trt = [SadighEtAl1997()] rupture = _create_rupture(10., 6.) data = [(rupture, PMF([(0.7, 0), (0.3, 1)])), (rupture, PMF([(0.6, 0), (0.4, 1)]))] src = NonParametricSeismicSource('0', 'test', TRT.ACTIVE_SHALLOW_CRUST, data) src.src_group_id = [0] group = SourceGroup( src.tectonic_region_type, [src], 'test', 'mutex', 'mutex') param = dict(imtls=self.imtls) crv = classical(group, self.sites, gsim_by_trt, param)[0] npt.assert_almost_equal(numpy.array([0.35000, 0.32497, 0.10398]), crv[0].array[:, 0], decimal=4) def test_raise_error_non_uniform_group(self): # Test that the uniformity of a group (in terms of tectonic region) # is correctly checked self.assertRaises( AssertionError, SourceGroup, TRT.ACTIVE_SHALLOW_CRUST, [self.src1, self.src3], 'test', 'indep', 'indep') class HazardCurvesTestCase02(HazardCurvesTestCase01): def test_hazard_curve_A(self): # Test classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src1], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.40000, 0.36088, 0.07703]), crv, decimal=4) def test_hazard_curve_B(self): # Test classical case i.e. independent sources in a list instance curves = calc_hazard_curves([self.src1, self.src2], self.sites, self.imtls, self.gsim_by_trt, truncation_level=None) crv = curves[0][0] npt.assert_almost_equal(numpy.array([0.58000, 0.53, 0.1347]), crv, decimal=4) class NankaiTestCase(unittest.TestCase): # use a source model for the Nankai region provided by M. Pagani def test(self): source_model = os.path.join(os.path.dirname(__file__), 'nankai.xml') groups = nrml.parse(source_model, SourceConverter( investigation_time=50., rupture_mesh_spacing=2.)) site = Site(Point(135.68, 35.68), 800, True, z1pt0=100., z2pt5=1.) s_filter = SourceFilter(SiteCollection([site]), {}) imtls = DictArray({'PGV': [20, 40, 80]}) gsim_by_trt = {'Subduction Interface': SiMidorikawa1999SInter()} hcurves = calc_hazard_curves(groups, s_filter, imtls, gsim_by_trt) npt.assert_almost_equal( [1.11315443e-01, 3.92180097e-03, 3.02064427e-05], hcurves['PGV'][0]) class MultiPointTestCase(unittest.TestCase): def test(self): d = os.path.dirname(os.path.dirname(__file__)) source_model = os.path.join(d, 'source_model/multi-point-source.xml') groups = nrml.parse(source_model, SourceConverter( investigation_time=50., rupture_mesh_spacing=2.)) site = Site(Point(0.1, 0.1), 800, True, z1pt0=100., z2pt5=1.) sitecol = SiteCollection([site]) imtls = DictArray({'PGA': [0.01, 0.02, 0.04, 0.08, 0.16]}) gsim_by_trt = {'Stable Continental Crust': Campbell2003()} hcurves = calc_hazard_curves(groups, sitecol, imtls, gsim_by_trt) expected = [0.99999778, 0.9084039, 0.148975348, 0.0036909656, 2.76326e-05] npt.assert_almost_equal(hcurves['PGA'][0], expected) # splitting in point sources [[mps1, mps2]] = groups psources = list(mps1) + list(mps2) hcurves = calc_hazard_curves(psources, sitecol, imtls, gsim_by_trt) npt.assert_almost_equal(hcurves['PGA'][0], expected)
import signal import sys import socket import struct from tools.debug import Debug UDP_IP = "127.0.0.1" UDP_PORT = 20777 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) def main(): index = 1 value = None while True: data, address = sock.recvfrom(512) if not data: continue stats = struct.unpack('66f', data[0:264]) new_value = "%.14f;%.14f;%d" % (stats[64], stats[63], int(stats[65])) if new_value != value: value = new_value print(index, value) index += 1 # noinspection PyUnusedLocal def exit_gracefully(signum, frame): Debug.warn('Process killed (%s). Exiting gracefully' % signum) sock.close() sys.exit(0) if __name__ == '__main__': signal.signal(signal.SIGINT, exit_gracefully) signal.signal(signal.SIGTERM, exit_gracefully) signal.signal(signal.SIGABRT, exit_gracefully) main() fix car scanner dependencies import signal import sys import socket import struct from debug import Debug UDP_IP = "127.0.0.1" UDP_PORT = 20777 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) def main(): index = 1 value = None while True: data, address = sock.recvfrom(512) if not data: continue stats = struct.unpack('66f', data[0:264]) new_value = "%.14f;%.14f;%d" % (stats[64], stats[63], int(stats[65])) if new_value != value: value = new_value print(index, value) index += 1 # noinspection PyUnusedLocal def exit_gracefully(signum, frame): Debug.warn('Process killed (%s). Exiting gracefully' % signum) sock.close() sys.exit(0) if __name__ == '__main__': signal.signal(signal.SIGINT, exit_gracefully) signal.signal(signal.SIGTERM, exit_gracefully) signal.signal(signal.SIGABRT, exit_gracefully) main()
import os import sys import json import requests from ArgumentParser import parser # Processing # ---------- def process_files(args): """ :param args: The arguments parsed by argparse :returns: A dict containing file_names as keys and a dict containing a key `content` as the value Example return: { "file_name": { "content": { # file contents } } } """ files = [os.path.abspath(file) for file in args.files] file_contents = {} for file in files: try: f = open(file) file_contents[os.path.split(file)[1]] = dict(content=f.read()) f.close() except FileNotFoundError: print('File "{}"\n\tdoes not exist'.format(file)) should_create = input('Create the gist without this file [Y/n]: ') or 'Y' if not should_create == 'Y': sys.exit("gistey: exiting ...") return file_contents def create_gist(data): """ :param data: The JSON data to be posted to the API :returns: request object of the POST request made to create the gist """ end_point = 'https://api.github.com/gists' rq = requests.post(end_point, json=data) return rq def construct_data(args): """ :param args: The arguments parsed by argparse :returns: `data` dict to be passed to crete the POST request """ data = { "public": not args.secret, "description": args.description if args.description else '', "files": process_files(args) } return data # Execution # --------- def main(): args = parser.parse_args() response = create_gist(construct_data(args)) print("URL of the gist created: ", response.json()["html_url"]) if __name__ == "__main__": main() gistey.py: Use `.` for relative imports because of packaging import os import sys import json import requests from .ArgumentParser import parser # Processing # ---------- def process_files(args): """ :param args: The arguments parsed by argparse :returns: A dict containing file_names as keys and a dict containing a key `content` as the value Example return: { "file_name": { "content": { # file contents } } } """ files = [os.path.abspath(file) for file in args.files] file_contents = {} for file in files: try: f = open(file) file_contents[os.path.split(file)[1]] = dict(content=f.read()) f.close() except FileNotFoundError: print('File "{}"\n\tdoes not exist'.format(file)) should_create = input('Create the gist without this file [Y/n]: ') or 'Y' if not should_create == 'Y': sys.exit("gistey: exiting ...") return file_contents def create_gist(data): """ :param data: The JSON data to be posted to the API :returns: request object of the POST request made to create the gist """ end_point = 'https://api.github.com/gists' rq = requests.post(end_point, json=data) return rq def construct_data(args): """ :param args: The arguments parsed by argparse :returns: `data` dict to be passed to crete the POST request """ data = { "public": not args.secret, "description": args.description if args.description else '', "files": process_files(args) } return data # Execution # --------- def main(): args = parser.parse_args() response = create_gist(construct_data(args)) print("URL of the gist created: ", response.json()["html_url"]) if __name__ == "__main__": main()
# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com> # <isaku.yamahata at gmail com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import bridge_lib from neutron.tests import base class BridgeLibTest(base.BaseTestCase): """A test suite to exercise the bridge libraries """ _NAMESPACE = 'test-namespace' _BR_NAME = 'test-br' _IF_NAME = 'test-if' def setUp(self): super(BridgeLibTest, self).setUp() ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start() self.execute = ip_wrapper.return_value.netns.execute def _verify_bridge_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True) self.execute.reset_mock() def _verify_bridge_sysctl_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True, log_fail_as_error=True) self.execute.reset_mock() def test_is_bridged_interface(self): exists = lambda path: path == "/sys/class/net/tapOK/brport" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(bridge_lib.is_bridged_interface("tapOK")) self.assertFalse(bridge_lib.is_bridged_interface("tapKO")) def test_get_interface_bridge(self): with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]): br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsInstance(br, bridge_lib.BridgeDevice) self.assertEqual("br0", br.name) br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsNone(br) def _test_br(self, namespace=None): br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace) self.assertEqual(namespace, br.namespace) self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME]) br.setfd(0) self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0']) br.disable_stp() self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off']) br.disable_ipv6() cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME self._verify_bridge_sysctl_mock(['sysctl', '-w', cmd]) br.addif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'addif', self._BR_NAME, self._IF_NAME]) br.delif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'delif', self._BR_NAME, self._IF_NAME]) br.delbr() self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME]) def test_addbr_with_namespace(self): self._test_br(self._NAMESPACE) def test_addbr_without_namespace(self): self._test_br() def test_addbr_exists(self): self.execute.side_effect = RuntimeError() with mock.patch.object(bridge_lib.BridgeDevice, 'exists', return_value=True): bridge_lib.BridgeDevice.addbr(self._BR_NAME) bridge_lib.BridgeDevice.addbr(self._BR_NAME) def test_owns_interface(self): br = bridge_lib.BridgeDevice('br-int') exists = lambda path: path == "/sys/class/net/br-int/brif/abc" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(br.owns_interface("abc")) self.assertFalse(br.owns_interface("def")) def test_get_interfaces(self): br = bridge_lib.BridgeDevice('br-int') interfaces = ["tap1", "tap2"] with mock.patch('os.listdir', side_effect=[interfaces, OSError()]): self.assertEqual(interfaces, br.get_interfaces()) self.assertEqual([], br.get_interfaces()) Fix UT BridgeLibTest when IPv6 is disabled There was missing mock of ipv6_utils.is_enabled_and_bind_by_default() in BridgeLibTest unit tests and that cause failing some of tests from this module when tests are running on host with disabled IPv6. Now it's mocked and tests are running properly and are testing what they should test. Closes-Bug: #1773818 Change-Id: I9144450ce85e020c0e33c5214a2178acbbbf5f54 # Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com> # <isaku.yamahata at gmail com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import bridge_lib from neutron.tests import base class BridgeLibTest(base.BaseTestCase): """A test suite to exercise the bridge libraries """ _NAMESPACE = 'test-namespace' _BR_NAME = 'test-br' _IF_NAME = 'test-if' def setUp(self): super(BridgeLibTest, self).setUp() mock.patch( 'neutron.common.ipv6_utils.is_enabled_and_bind_by_default', return_value=True).start() ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start() self.execute = ip_wrapper.return_value.netns.execute def _verify_bridge_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True) self.execute.reset_mock() def _verify_bridge_sysctl_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True, log_fail_as_error=True) self.execute.reset_mock() def test_is_bridged_interface(self): exists = lambda path: path == "/sys/class/net/tapOK/brport" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(bridge_lib.is_bridged_interface("tapOK")) self.assertFalse(bridge_lib.is_bridged_interface("tapKO")) def test_get_interface_bridge(self): with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]): br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsInstance(br, bridge_lib.BridgeDevice) self.assertEqual("br0", br.name) br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsNone(br) def _test_br(self, namespace=None): br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace) self.assertEqual(namespace, br.namespace) self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME]) br.setfd(0) self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0']) br.disable_stp() self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off']) br.disable_ipv6() cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME self._verify_bridge_sysctl_mock(['sysctl', '-w', cmd]) br.addif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'addif', self._BR_NAME, self._IF_NAME]) br.delif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'delif', self._BR_NAME, self._IF_NAME]) br.delbr() self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME]) def test_addbr_with_namespace(self): self._test_br(self._NAMESPACE) def test_addbr_without_namespace(self): self._test_br() def test_addbr_exists(self): self.execute.side_effect = RuntimeError() with mock.patch.object(bridge_lib.BridgeDevice, 'exists', return_value=True): bridge_lib.BridgeDevice.addbr(self._BR_NAME) bridge_lib.BridgeDevice.addbr(self._BR_NAME) def test_owns_interface(self): br = bridge_lib.BridgeDevice('br-int') exists = lambda path: path == "/sys/class/net/br-int/brif/abc" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(br.owns_interface("abc")) self.assertFalse(br.owns_interface("def")) def test_get_interfaces(self): br = bridge_lib.BridgeDevice('br-int') interfaces = ["tap1", "tap2"] with mock.patch('os.listdir', side_effect=[interfaces, OSError()]): self.assertEqual(interfaces, br.get_interfaces()) self.assertEqual([], br.get_interfaces())
import sublime import sublime_plugin import subprocess import os import re class GitManager: def __init__(self, view): self.view = view s = sublime.load_settings("Git-StatusBar.sublime-settings") self.git = s.get("git", "git") self.prefix = s.get("prefix", "") def run_git(self, cmd, cwd=None): plat = sublime.platform() if not cwd: cwd = self.getcwd() if cwd: if type(cmd) == str: cmd = [cmd] cmd = [self.git] + cmd if plat == "windows": # make sure console does not come up startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, startupinfo=startupinfo) else: my_env = os.environ.copy() my_env["PATH"] = "/usr/local/bin/:" + my_env["PATH"] p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, env=my_env) p.wait() stdoutdata, _ = p.communicate() return stdoutdata.decode('utf-8') def getcwd(self): f = self.view.file_name() cwd = None if f: cwd = os.path.dirname(f) if not cwd: window = self.view.window() if window: pd = window.project_data() if pd: cwd = pd.get("folders")[0].get("path") return cwd def branch(self): ret = self.run_git(["symbolic-ref", "HEAD", "--short"]) if ret: ret = ret.strip() else: output = self.run_git("branch") if output: m = re.search(r"\* *\(detached from (.*?)\)", output, flags=re.MULTILINE) ret = m.group(1) return ret def is_dirty(self): output = self.run_git("status") ret = "working directory clean" not in output return ret def unpushed_info(self): branch = self.branch() a, b = 0, 0 if branch: output = self.run_git(["branch", "-v"]) if output: m = re.search(r"\* .*?\[behind ([0-9])+\]", output, flags=re.MULTILINE) if m: a = int(m.group(1)) m = re.search(r"\* .*?\[ahead ([0-9])+\]", output, flags=re.MULTILINE) if m: b = int(m.group(1)) return (a, b) def badge(self): branch = self.branch() if not branch: return "" ret = branch if self.is_dirty(): ret = ret + "*" a, b = self.unpushed_info() if a: ret = ret + "-%d" % a if b: ret = ret + "+%d" % b return self.prefix + ret class GitStatusBarHandler(sublime_plugin.EventListener): def update_status_bar(self, view): if view.is_scratch() or view.settings().get('is_widget'): return gm = GitManager(view) badge = gm.badge() if badge: view.set_status("git-statusbar", badge) else: view.erase_status("git-statusbar") def on_new(self, view): self.update_status_bar(view) def on_load(self, view): self.update_status_bar(view) def on_activated(self, view): self.update_status_bar(view) def on_deactivated(self, view): self.update_status_bar(view) def on_post_save(self, view): self.update_status_bar(view) def on_pre_close(self, view): self.update_status_bar(view) def on_window_command(self, window, command_name, args): if command_name == "hide_panel": self.update_status_bar(window.active_view()) check if re.search returns import sublime import sublime_plugin import subprocess import os import re class GitManager: def __init__(self, view): self.view = view s = sublime.load_settings("Git-StatusBar.sublime-settings") self.git = s.get("git", "git") self.prefix = s.get("prefix", "") def run_git(self, cmd, cwd=None): plat = sublime.platform() if not cwd: cwd = self.getcwd() if cwd: if type(cmd) == str: cmd = [cmd] cmd = [self.git] + cmd if plat == "windows": # make sure console does not come up startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, startupinfo=startupinfo) else: my_env = os.environ.copy() my_env["PATH"] = "/usr/local/bin/:" + my_env["PATH"] p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, env=my_env) p.wait() stdoutdata, _ = p.communicate() return stdoutdata.decode('utf-8') def getcwd(self): f = self.view.file_name() cwd = None if f: cwd = os.path.dirname(f) if not cwd: window = self.view.window() if window: pd = window.project_data() if pd: cwd = pd.get("folders")[0].get("path") return cwd def branch(self): ret = self.run_git(["symbolic-ref", "HEAD", "--short"]) if ret: ret = ret.strip() else: output = self.run_git("branch") if output: m = re.search(r"\* *\(detached from (.*?)\)", output, flags=re.MULTILINE) if m: ret = m.group(1) return ret def is_dirty(self): output = self.run_git("status") ret = "working directory clean" not in output return ret def unpushed_info(self): branch = self.branch() a, b = 0, 0 if branch: output = self.run_git(["branch", "-v"]) if output: m = re.search(r"\* .*?\[behind ([0-9])+\]", output, flags=re.MULTILINE) if m: a = int(m.group(1)) m = re.search(r"\* .*?\[ahead ([0-9])+\]", output, flags=re.MULTILINE) if m: b = int(m.group(1)) return (a, b) def badge(self): branch = self.branch() if not branch: return "" ret = branch if self.is_dirty(): ret = ret + "*" a, b = self.unpushed_info() if a: ret = ret + "-%d" % a if b: ret = ret + "+%d" % b return self.prefix + ret class GitStatusBarHandler(sublime_plugin.EventListener): def update_status_bar(self, view): if view.is_scratch() or view.settings().get('is_widget'): return gm = GitManager(view) badge = gm.badge() if badge: view.set_status("git-statusbar", badge) else: view.erase_status("git-statusbar") def on_new(self, view): self.update_status_bar(view) def on_load(self, view): self.update_status_bar(view) def on_activated(self, view): self.update_status_bar(view) def on_deactivated(self, view): self.update_status_bar(view) def on_post_save(self, view): self.update_status_bar(view) def on_pre_close(self, view): self.update_status_bar(view) def on_window_command(self, window, command_name, args): if command_name == "hide_panel": self.update_status_bar(window.active_view())
""" History related magics and functionality """ #----------------------------------------------------------------------------- # Copyright (C) 2010 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Stdlib imports import atexit import datetime import os import re import sqlite3 import threading # Our own packages from IPython.config.configurable import Configurable from IPython.testing.skipdoctest import skip_doctest from IPython.utils import io from IPython.utils.traitlets import Bool, Dict, Instance, Int, List, Unicode from IPython.utils.warn import warn #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class HistoryManager(Configurable): """A class to organize all history-related functionality in one place. """ # Public interface # An instance of the IPython shell we are attached to shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') # Lists to hold processed and raw history. These start with a blank entry # so that we can index them starting from 1 input_hist_parsed = List([""]) input_hist_raw = List([""]) # A list of directories visited during session dir_hist = List() def _dir_hist_default(self): try: return [os.getcwdu()] except OSError: return [] # A dict of output history, keyed with ints from the shell's # execution count. output_hist = Dict() # The text/plain repr of outputs. output_hist_reprs = Dict() # String holding the path to the history file hist_file = Unicode(config=True) # The SQLite database db = Instance(sqlite3.Connection) # The number of the current session in the history database session_number = Int() # Should we log output to the database? (default no) db_log_output = Bool(False, config=True) # Write to database every x commands (higher values save disk access & power) # Values of 1 or less effectively disable caching. db_cache_size = Int(0, config=True) # The input and output caches db_input_cache = List() db_output_cache = List() # History saving in separate thread save_thread = Instance('IPython.core.history.HistorySavingThread') # N.B. Event is a function returning an instance of _Event. save_flag = Instance(threading._Event) # Private interface # Variables used to store the three last inputs from the user. On each new # history update, we populate the user's namespace with these, shifted as # necessary. _i00 = Unicode(u'') _i = Unicode(u'') _ii = Unicode(u'') _iii = Unicode(u'') # A regex matching all forms of the exit command, so that we don't store # them in the history (it's annoying to rewind the first entry and land on # an exit call). _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$") def __init__(self, shell, config=None, **traits): """Create a new history manager associated with a shell instance. """ # We need a pointer back to the shell for various tasks. super(HistoryManager, self).__init__(shell=shell, config=config, **traits) if self.hist_file == u'': # No one has set the hist_file, yet. histfname = 'history' self.hist_file = os.path.join(shell.profile_dir.location, histfname + '.sqlite') try: self.init_db() except sqlite3.DatabaseError: if os.path.isfile(self.hist_file): # Try to move the file out of the way. newpath = os.path.join(self.shell.profile_dir.location, "hist-corrupt.sqlite") os.rename(self.hist_file, newpath) print("ERROR! History file wasn't a valid SQLite database.", "It was moved to %s" % newpath, "and a new file created.") self.init_db() else: # The hist_file is probably :memory: or something else. raise self.save_flag = threading.Event() self.db_input_cache_lock = threading.Lock() self.db_output_cache_lock = threading.Lock() self.save_thread = HistorySavingThread(self) self.save_thread.start() self.new_session() def init_db(self): """Connect to the database, and create tables if necessary.""" # use detect_types so that timestamps return datetime objects self.db = sqlite3.connect(self.hist_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer primary key autoincrement, start timestamp, end timestamp, num_cmds integer, remark text)""") self.db.execute("""CREATE TABLE IF NOT EXISTS history (session integer, line integer, source text, source_raw text, PRIMARY KEY (session, line))""") # Output history is optional, but ensure the table's there so it can be # enabled later. self.db.execute("""CREATE TABLE IF NOT EXISTS output_history (session integer, line integer, output text, PRIMARY KEY (session, line))""") self.db.commit() def new_session(self, conn=None): """Get a new session number.""" if conn is None: conn = self.db with conn: cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL, NULL, "") """, (datetime.datetime.now(),)) self.session_number = cur.lastrowid def end_session(self): """Close the database session, filling in the end time and line count.""" self.writeout_cache() with self.db: self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE session==?""", (datetime.datetime.now(), len(self.input_hist_parsed)-1, self.session_number)) self.session_number = 0 def name_session(self, name): """Give the current session a name in the history database.""" with self.db: self.db.execute("UPDATE sessions SET remark=? WHERE session==?", (name, self.session_number)) def reset(self, new_session=True): """Clear the session history, releasing all object references, and optionally open a new session.""" self.output_hist.clear() # The directory history can't be completely empty self.dir_hist[:] = [os.getcwdu()] if new_session: if self.session_number: self.end_session() self.input_hist_parsed[:] = [""] self.input_hist_raw[:] = [""] self.new_session() ## ------------------------------- ## Methods for retrieving history: ## ------------------------------- def _run_sql(self, sql, params, raw=True, output=False): """Prepares and runs an SQL query for the history database. Parameters ---------- sql : str Any filtering expressions to go after SELECT ... FROM ... params : tuple Parameters passed to the SQL query (to replace "?") raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ toget = 'source_raw' if raw else 'source' sqlfrom = "history" if output: sqlfrom = "history LEFT JOIN output_history USING (session, line)" toget = "history.%s, output_history.output" % toget cur = self.db.execute("SELECT session, line, %s FROM %s " %\ (toget, sqlfrom) + sql, params) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur def get_session_info(self, session=0): """get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. """ if session <= 0: session += self.session_number query = "SELECT * from sessions where session == ?" return self.db.execute(query, (session,)).fetchone() def get_tail(self, n=10, raw=True, output=False, include_latest=False): """Get the last n lines from the history database. Parameters ---------- n : int The number of lines to get raw, output : bool See :meth:`get_range` include_latest : bool If False (default), n+1 lines are fetched, and the latest one is discarded. This is intended to be used where the function is called by a user command, which it should not return. Returns ------- Tuples as :meth:`get_range` """ self.writeout_cache() if not include_latest: n += 1 cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output) if not include_latest: return reversed(list(cur)[1:]) return reversed(list(cur)) def search(self, pattern="*", raw=True, search_raw=True, output=False): """Search the database using unix glob-style matching (wildcards * and ?). Parameters ---------- pattern : str The wildcarded pattern to match when searching search_raw : bool If True, search the raw input, otherwise, the parsed input raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ tosearch = "source_raw" if search_raw else "source" if output: tosearch = "history." + tosearch self.writeout_cache() return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,), raw=raw, output=output) def _get_range_session(self, start=1, stop=None, raw=True, output=False): """Get input and output history from the current session. Called by get_range, and takes similar parameters.""" input_hist = self.input_hist_raw if raw else self.input_hist_parsed n = len(input_hist) if start < 0: start += n if not stop: stop = n elif stop < 0: stop += n for i in range(start, stop): if output: line = (input_hist[i], self.output_hist_reprs.get(i)) else: line = input_hist[i] yield (0, i, line) def get_range(self, session=0, start=1, stop=None, raw=True,output=False): """Retrieve input by session. Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. start : int First line to retrieve. stop : int End of line range (excluded from output itself). If None, retrieve to the end of the session. raw : bool If True, return untranslated input output : bool If True, attempt to include output. This will be 'real' Python objects for the current session, or text reprs from previous sessions if db_log_output was enabled at the time. Where no output is found, None is used. Returns ------- An iterator over the desired lines. Each line is a 3-tuple, either (session, line, input) if output is False, or (session, line, (input, output)) if output is True. """ if session == 0 or session==self.session_number: # Current session return self._get_range_session(start, stop, raw, output) if session < 0: session += self.session_number if stop: lineclause = "line >= ? AND line < ?" params = (session, start, stop) else: lineclause = "line>=?" params = (session, start) return self._run_sql("WHERE session==? AND %s""" % lineclause, params, raw=raw, output=output) def get_range_by_str(self, rangestr, raw=True, output=False): """Get lines of history from a string of ranges, as used by magic commands %hist, %save, %macro, etc. Parameters ---------- rangestr : str A string specifying ranges, e.g. "5 ~2/1-4". See :func:`magic_history` for full details. raw, output : bool As :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ for sess, s, e in extract_hist_ranges(rangestr): for line in self.get_range(sess, s, e, raw=raw, output=output): yield line ## ---------------------------- ## Methods for storing history: ## ---------------------------- def store_inputs(self, line_num, source, source_raw=None): """Store source and raw input in history and create input cache variables _i*. Parameters ---------- line_num : int The prompt number of this input. source : str Python input. source_raw : str, optional If given, this is the raw input without any IPython transformations applied to it. If not given, ``source`` is used. """ if source_raw is None: source_raw = source source = source.rstrip('\n') source_raw = source_raw.rstrip('\n') # do not store exit/quit commands if self._exit_re.match(source_raw.strip()): return self.input_hist_parsed.append(source) self.input_hist_raw.append(source_raw) with self.db_input_cache_lock: self.db_input_cache.append((line_num, source, source_raw)) # Trigger to flush cache and write to DB. if len(self.db_input_cache) >= self.db_cache_size: self.save_flag.set() # update the auto _i variables self._iii = self._ii self._ii = self._i self._i = self._i00 self._i00 = source_raw # hackish access to user namespace to create _i1,_i2... dynamically new_i = '_i%s' % line_num to_main = {'_i': self._i, '_ii': self._ii, '_iii': self._iii, new_i : self._i00 } self.shell.user_ns.update(to_main) def store_output(self, line_num): """If database output logging is enabled, this saves all the outputs from the indicated prompt number to the database. It's called by run_cell after code has been executed. Parameters ---------- line_num : int The line number from which to save outputs """ if (not self.db_log_output) or (line_num not in self.output_hist_reprs): return output = self.output_hist_reprs[line_num] with self.db_output_cache_lock: self.db_output_cache.append((line_num, output)) if self.db_cache_size <= 1: self.save_flag.set() def _writeout_input_cache(self, conn): with conn: for line in self.db_input_cache: conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)", (self.session_number,)+line) def _writeout_output_cache(self, conn): with conn: for line in self.db_output_cache: conn.execute("INSERT INTO output_history VALUES (?, ?, ?)", (self.session_number,)+line) def writeout_cache(self, conn=None): """Write any entries in the cache to the database.""" if conn is None: conn = self.db with self.db_input_cache_lock: try: self._writeout_input_cache(conn) except sqlite3.IntegrityError: self.new_session(conn) print("ERROR! Session/line number was not unique in", "database. History logging moved to new session", self.session_number) try: # Try writing to the new session. If this fails, don't recurse self._writeout_input_cache(conn) except sqlite3.IntegrityError: pass finally: self.db_input_cache = [] with self.db_output_cache_lock: try: self._writeout_output_cache(conn) except sqlite3.IntegrityError: print("!! Session/line number for output was not unique", "in database. Output will not be stored.") finally: self.db_output_cache = [] class HistorySavingThread(threading.Thread): """This thread takes care of writing history to the database, so that the UI isn't held up while that happens. It waits for the HistoryManager's save_flag to be set, then writes out the history cache. The main thread is responsible for setting the flag when the cache size reaches a defined threshold.""" daemon = True stop_now = False def __init__(self, history_manager): super(HistorySavingThread, self).__init__() self.history_manager = history_manager atexit.register(self.stop) def run(self): # We need a separate db connection per thread: try: self.db = sqlite3.connect(self.history_manager.hist_file) while True: self.history_manager.save_flag.wait() if self.stop_now: return self.history_manager.save_flag.clear() self.history_manager.writeout_cache(self.db) except Exception as e: print(("The history saving thread hit an unexpected error (%s)." "History will not be written to the database.") % repr(e)) def stop(self): """This can be called from the main thread to safely stop this thread. Note that it does not attempt to write out remaining history before exiting. That should be done by calling the HistoryManager's end_session method.""" self.stop_now = True self.history_manager.save_flag.set() self.join() # To match, e.g. ~5/8-~2/3 range_re = re.compile(r""" ((?P<startsess>~?\d+)/)? (?P<start>\d+) # Only the start line num is compulsory ((?P<sep>[\-:]) ((?P<endsess>~?\d+)/)? (?P<end>\d+))? $""", re.VERBOSE) def extract_hist_ranges(ranges_str): """Turn a string of history ranges into 3-tuples of (session, start, stop). Examples -------- list(extract_input_ranges("~8/5-~7/4 2")) [(-8, 5, None), (-7, 1, 4), (0, 2, 3)] """ for range_str in ranges_str.split(): rmatch = range_re.match(range_str) if not rmatch: continue start = int(rmatch.group("start")) end = rmatch.group("end") end = int(end) if end else start+1 # If no end specified, get (a, a+1) if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3] end += 1 startsess = rmatch.group("startsess") or "0" endsess = rmatch.group("endsess") or startsess startsess = int(startsess.replace("~","-")) endsess = int(endsess.replace("~","-")) assert endsess >= startsess if endsess == startsess: yield (startsess, start, end) continue # Multiple sessions in one range: yield (startsess, start, None) for sess in range(startsess+1, endsess): yield (sess, 1, None) yield (endsess, 1, end) def _format_lineno(session, line): """Helper function to format line numbers properly.""" if session == 0: return str(line) return "%s#%s" % (session, line) @skip_doctest def magic_history(self, parameter_s = ''): """Print input history (_i<n> variables), with most recent last. %history -> print at most 40 inputs (some may be multi-line)\\ %history n -> print at most n inputs\\ %history n1 n2 -> print inputs between n1 and n2 (n2 not included)\\ By default, input history is printed without line numbers so it can be directly pasted into an editor. Use -n to show them. Ranges of history can be indicated using the syntax: 4 : Line 4, current session 4-6 : Lines 4-6, current session 243/1-5: Lines 1-5, session 243 ~2/7 : Line 7, session 2 before current ~8/1-~6/5 : From the first line of 8 sessions ago, to the fifth line of 6 sessions ago. Multiple ranges can be entered, separated by spaces The same syntax is used by %macro, %save, %edit, %rerun Options: -n: print line numbers for each input. This feature is only available if numbered prompts are in use. -o: also print outputs for each input. -p: print classic '>>>' python prompts before each input. This is useful for making documentation, and in conjunction with -o, for producing doctest-ready output. -r: (default) print the 'raw' history, i.e. the actual commands you typed. -t: print the 'translated' history, as IPython understands it. IPython filters your input and converts it all into valid Python source before executing it (things like magics or aliases are turned into function calls, for example). With this option, you'll see the native history instead of the user-entered version: '%cd /' will be seen as 'get_ipython().magic("%cd /")' instead of '%cd /'. -g: treat the arg as a pattern to grep for in (full) history. This includes the saved history (almost all commands ever written). Use '%hist -g' to show full saved history (may be very long). -l: get the last n lines from all sessions. Specify n as a single arg, or the default is the last 10 lines. -f FILENAME: instead of printing the output to the screen, redirect it to the given file. The file is always overwritten, though IPython asks for confirmation first if it already exists. Examples -------- :: In [6]: %hist -n 4 6 4:a = 12 5:print a**2 """ if not self.shell.displayhook.do_full_cache: print('This feature is only available if numbered prompts are in use.') return opts,args = self.parse_options(parameter_s,'noprtglf:',mode='string') # For brevity history_manager = self.shell.history_manager def _format_lineno(session, line): """Helper function to format line numbers properly.""" if session in (0, history_manager.session_number): return str(line) return "%s/%s" % (session, line) # Check if output to specific file was requested. try: outfname = opts['f'] except KeyError: outfile = io.stdout # default # We don't want to close stdout at the end! close_at_end = False else: if os.path.exists(outfname): if not io.ask_yes_no("File %r exists. Overwrite?" % outfname): print('Aborting.') return outfile = open(outfname,'w') close_at_end = True print_nums = 'n' in opts get_output = 'o' in opts pyprompts = 'p' in opts # Raw history is the default raw = not('t' in opts) default_length = 40 pattern = None if 'g' in opts: # Glob search pattern = "*" + args + "*" if args else "*" hist = history_manager.search(pattern, raw=raw, output=get_output) print_nums = True elif 'l' in opts: # Get 'tail' try: n = int(args) except ValueError, IndexError: n = 10 hist = history_manager.get_tail(n, raw=raw, output=get_output) else: if args: # Get history by ranges hist = history_manager.get_range_by_str(args, raw, get_output) else: # Just get history for the current session hist = history_manager.get_range(raw=raw, output=get_output) # We could be displaying the entire history, so let's not try to pull it # into a list in memory. Anything that needs more space will just misalign. width = 4 for session, lineno, inline in hist: # Print user history with tabs expanded to 4 spaces. The GUI clients # use hard tabs for easier usability in auto-indented code, but we want # to produce PEP-8 compliant history for safe pasting into an editor. if get_output: inline, output = inline inline = inline.expandtabs(4).rstrip() multiline = "\n" in inline line_sep = '\n' if multiline else ' ' if print_nums: print('%s:%s' % (_format_lineno(session, lineno).rjust(width), line_sep), file=outfile, end='') if pyprompts: print(">>> ", end="", file=outfile) if multiline: inline = "\n... ".join(inline.splitlines()) + "\n..." print(inline, file=outfile) if get_output and output: print(output, file=outfile) if close_at_end: outfile.close() def magic_rep(self, arg): r"""Repeat a command, or get command to input line for editing. %recall and %rep are equivalent. - %recall (no arguments): Place a string version of last computation result (stored in the special '_' variable) to the next input prompt. Allows you to create elaborate command lines without using copy-paste:: In[1]: l = ["hei", "vaan"] In[2]: "".join(l) Out[2]: heivaan In[3]: %rep In[4]: heivaan_ <== cursor blinking %recall 45 Place history line 45 on the next input prompt. Use %hist to find out the number. %recall 1-4 Combine the specified lines into one cell, and place it on the next input prompt. See %history for the slice syntax. %recall foo+bar If foo+bar can be evaluated in the user namespace, the result is placed at the next input prompt. Otherwise, the history is searched for lines which contain that substring, and the most recent one is placed at the next input prompt. """ if not arg: # Last output self.set_next_input(str(self.shell.user_ns["_"])) return # Get history range histlines = self.history_manager.get_range_by_str(arg) cmd = "\n".join(x[2] for x in histlines) if cmd: self.set_next_input(cmd.rstrip()) return try: # Variable in user namespace cmd = str(eval(arg, self.shell.user_ns)) except Exception: # Search for term in history histlines = self.history_manager.search("*"+arg+"*") for h in reversed([x[2] for x in histlines]): if 'rep' in h: continue self.set_next_input(h.rstrip()) return else: self.set_next_input(cmd.rstrip()) print("Couldn't evaluate or find in history:", arg) def magic_rerun(self, parameter_s=''): """Re-run previous input By default, you can specify ranges of input history to be repeated (as with %history). With no arguments, it will repeat the last line. Options: -l <n> : Repeat the last n lines of input, not including the current command. -g foo : Repeat the most recent line which contains foo """ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string') if "l" in opts: # Last n lines n = int(opts['l']) hist = self.history_manager.get_tail(n) elif "g" in opts: # Search p = "*"+opts['g']+"*" hist = list(self.history_manager.search(p)) for l in reversed(hist): if "rerun" not in l[2]: hist = [l] # The last match which isn't a %rerun break else: hist = [] # No matches except %rerun elif args: # Specify history ranges hist = self.history_manager.get_range_by_str(args) else: # Last line hist = self.history_manager.get_tail(1) hist = [x[2] for x in hist] if not hist: print("No lines in history match specification") return histlines = "\n".join(hist) print("=== Executing: ===") print(histlines) print("=== Output: ===") self.run_cell("\n".join(hist), store_history=False) def init_ipython(ip): ip.define_magic("rep", magic_rep) ip.define_magic("recall", magic_rep) ip.define_magic("rerun", magic_rerun) ip.define_magic("hist",magic_history) # Alternative name ip.define_magic("history",magic_history) # XXX - ipy_completers are in quarantine, need to be updated to new apis #import ipy_completers #ipy_completers.quick_completer('%hist' ,'-g -t -r -n') Use casting trait for session number in history, so IPython will start on PyPy. Closes gh-719 """ History related magics and functionality """ #----------------------------------------------------------------------------- # Copyright (C) 2010 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Stdlib imports import atexit import datetime import os import re import sqlite3 import threading # Our own packages from IPython.config.configurable import Configurable from IPython.testing.skipdoctest import skip_doctest from IPython.utils import io from IPython.utils.traitlets import Bool, Dict, Instance, Int, CInt, List, Unicode from IPython.utils.warn import warn #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class HistoryManager(Configurable): """A class to organize all history-related functionality in one place. """ # Public interface # An instance of the IPython shell we are attached to shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') # Lists to hold processed and raw history. These start with a blank entry # so that we can index them starting from 1 input_hist_parsed = List([""]) input_hist_raw = List([""]) # A list of directories visited during session dir_hist = List() def _dir_hist_default(self): try: return [os.getcwdu()] except OSError: return [] # A dict of output history, keyed with ints from the shell's # execution count. output_hist = Dict() # The text/plain repr of outputs. output_hist_reprs = Dict() # String holding the path to the history file hist_file = Unicode(config=True) # The SQLite database db = Instance(sqlite3.Connection) # The number of the current session in the history database session_number = CInt() # Should we log output to the database? (default no) db_log_output = Bool(False, config=True) # Write to database every x commands (higher values save disk access & power) # Values of 1 or less effectively disable caching. db_cache_size = Int(0, config=True) # The input and output caches db_input_cache = List() db_output_cache = List() # History saving in separate thread save_thread = Instance('IPython.core.history.HistorySavingThread') # N.B. Event is a function returning an instance of _Event. save_flag = Instance(threading._Event) # Private interface # Variables used to store the three last inputs from the user. On each new # history update, we populate the user's namespace with these, shifted as # necessary. _i00 = Unicode(u'') _i = Unicode(u'') _ii = Unicode(u'') _iii = Unicode(u'') # A regex matching all forms of the exit command, so that we don't store # them in the history (it's annoying to rewind the first entry and land on # an exit call). _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$") def __init__(self, shell, config=None, **traits): """Create a new history manager associated with a shell instance. """ # We need a pointer back to the shell for various tasks. super(HistoryManager, self).__init__(shell=shell, config=config, **traits) if self.hist_file == u'': # No one has set the hist_file, yet. histfname = 'history' self.hist_file = os.path.join(shell.profile_dir.location, histfname + '.sqlite') try: self.init_db() except sqlite3.DatabaseError: if os.path.isfile(self.hist_file): # Try to move the file out of the way. newpath = os.path.join(self.shell.profile_dir.location, "hist-corrupt.sqlite") os.rename(self.hist_file, newpath) print("ERROR! History file wasn't a valid SQLite database.", "It was moved to %s" % newpath, "and a new file created.") self.init_db() else: # The hist_file is probably :memory: or something else. raise self.save_flag = threading.Event() self.db_input_cache_lock = threading.Lock() self.db_output_cache_lock = threading.Lock() self.save_thread = HistorySavingThread(self) self.save_thread.start() self.new_session() def init_db(self): """Connect to the database, and create tables if necessary.""" # use detect_types so that timestamps return datetime objects self.db = sqlite3.connect(self.hist_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer primary key autoincrement, start timestamp, end timestamp, num_cmds integer, remark text)""") self.db.execute("""CREATE TABLE IF NOT EXISTS history (session integer, line integer, source text, source_raw text, PRIMARY KEY (session, line))""") # Output history is optional, but ensure the table's there so it can be # enabled later. self.db.execute("""CREATE TABLE IF NOT EXISTS output_history (session integer, line integer, output text, PRIMARY KEY (session, line))""") self.db.commit() def new_session(self, conn=None): """Get a new session number.""" if conn is None: conn = self.db with conn: cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL, NULL, "") """, (datetime.datetime.now(),)) self.session_number = cur.lastrowid def end_session(self): """Close the database session, filling in the end time and line count.""" self.writeout_cache() with self.db: self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE session==?""", (datetime.datetime.now(), len(self.input_hist_parsed)-1, self.session_number)) self.session_number = 0 def name_session(self, name): """Give the current session a name in the history database.""" with self.db: self.db.execute("UPDATE sessions SET remark=? WHERE session==?", (name, self.session_number)) def reset(self, new_session=True): """Clear the session history, releasing all object references, and optionally open a new session.""" self.output_hist.clear() # The directory history can't be completely empty self.dir_hist[:] = [os.getcwdu()] if new_session: if self.session_number: self.end_session() self.input_hist_parsed[:] = [""] self.input_hist_raw[:] = [""] self.new_session() ## ------------------------------- ## Methods for retrieving history: ## ------------------------------- def _run_sql(self, sql, params, raw=True, output=False): """Prepares and runs an SQL query for the history database. Parameters ---------- sql : str Any filtering expressions to go after SELECT ... FROM ... params : tuple Parameters passed to the SQL query (to replace "?") raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ toget = 'source_raw' if raw else 'source' sqlfrom = "history" if output: sqlfrom = "history LEFT JOIN output_history USING (session, line)" toget = "history.%s, output_history.output" % toget cur = self.db.execute("SELECT session, line, %s FROM %s " %\ (toget, sqlfrom) + sql, params) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur def get_session_info(self, session=0): """get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. """ if session <= 0: session += self.session_number query = "SELECT * from sessions where session == ?" return self.db.execute(query, (session,)).fetchone() def get_tail(self, n=10, raw=True, output=False, include_latest=False): """Get the last n lines from the history database. Parameters ---------- n : int The number of lines to get raw, output : bool See :meth:`get_range` include_latest : bool If False (default), n+1 lines are fetched, and the latest one is discarded. This is intended to be used where the function is called by a user command, which it should not return. Returns ------- Tuples as :meth:`get_range` """ self.writeout_cache() if not include_latest: n += 1 cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output) if not include_latest: return reversed(list(cur)[1:]) return reversed(list(cur)) def search(self, pattern="*", raw=True, search_raw=True, output=False): """Search the database using unix glob-style matching (wildcards * and ?). Parameters ---------- pattern : str The wildcarded pattern to match when searching search_raw : bool If True, search the raw input, otherwise, the parsed input raw, output : bool See :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ tosearch = "source_raw" if search_raw else "source" if output: tosearch = "history." + tosearch self.writeout_cache() return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,), raw=raw, output=output) def _get_range_session(self, start=1, stop=None, raw=True, output=False): """Get input and output history from the current session. Called by get_range, and takes similar parameters.""" input_hist = self.input_hist_raw if raw else self.input_hist_parsed n = len(input_hist) if start < 0: start += n if not stop: stop = n elif stop < 0: stop += n for i in range(start, stop): if output: line = (input_hist[i], self.output_hist_reprs.get(i)) else: line = input_hist[i] yield (0, i, line) def get_range(self, session=0, start=1, stop=None, raw=True,output=False): """Retrieve input by session. Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. start : int First line to retrieve. stop : int End of line range (excluded from output itself). If None, retrieve to the end of the session. raw : bool If True, return untranslated input output : bool If True, attempt to include output. This will be 'real' Python objects for the current session, or text reprs from previous sessions if db_log_output was enabled at the time. Where no output is found, None is used. Returns ------- An iterator over the desired lines. Each line is a 3-tuple, either (session, line, input) if output is False, or (session, line, (input, output)) if output is True. """ if session == 0 or session==self.session_number: # Current session return self._get_range_session(start, stop, raw, output) if session < 0: session += self.session_number if stop: lineclause = "line >= ? AND line < ?" params = (session, start, stop) else: lineclause = "line>=?" params = (session, start) return self._run_sql("WHERE session==? AND %s""" % lineclause, params, raw=raw, output=output) def get_range_by_str(self, rangestr, raw=True, output=False): """Get lines of history from a string of ranges, as used by magic commands %hist, %save, %macro, etc. Parameters ---------- rangestr : str A string specifying ranges, e.g. "5 ~2/1-4". See :func:`magic_history` for full details. raw, output : bool As :meth:`get_range` Returns ------- Tuples as :meth:`get_range` """ for sess, s, e in extract_hist_ranges(rangestr): for line in self.get_range(sess, s, e, raw=raw, output=output): yield line ## ---------------------------- ## Methods for storing history: ## ---------------------------- def store_inputs(self, line_num, source, source_raw=None): """Store source and raw input in history and create input cache variables _i*. Parameters ---------- line_num : int The prompt number of this input. source : str Python input. source_raw : str, optional If given, this is the raw input without any IPython transformations applied to it. If not given, ``source`` is used. """ if source_raw is None: source_raw = source source = source.rstrip('\n') source_raw = source_raw.rstrip('\n') # do not store exit/quit commands if self._exit_re.match(source_raw.strip()): return self.input_hist_parsed.append(source) self.input_hist_raw.append(source_raw) with self.db_input_cache_lock: self.db_input_cache.append((line_num, source, source_raw)) # Trigger to flush cache and write to DB. if len(self.db_input_cache) >= self.db_cache_size: self.save_flag.set() # update the auto _i variables self._iii = self._ii self._ii = self._i self._i = self._i00 self._i00 = source_raw # hackish access to user namespace to create _i1,_i2... dynamically new_i = '_i%s' % line_num to_main = {'_i': self._i, '_ii': self._ii, '_iii': self._iii, new_i : self._i00 } self.shell.user_ns.update(to_main) def store_output(self, line_num): """If database output logging is enabled, this saves all the outputs from the indicated prompt number to the database. It's called by run_cell after code has been executed. Parameters ---------- line_num : int The line number from which to save outputs """ if (not self.db_log_output) or (line_num not in self.output_hist_reprs): return output = self.output_hist_reprs[line_num] with self.db_output_cache_lock: self.db_output_cache.append((line_num, output)) if self.db_cache_size <= 1: self.save_flag.set() def _writeout_input_cache(self, conn): with conn: for line in self.db_input_cache: conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)", (self.session_number,)+line) def _writeout_output_cache(self, conn): with conn: for line in self.db_output_cache: conn.execute("INSERT INTO output_history VALUES (?, ?, ?)", (self.session_number,)+line) def writeout_cache(self, conn=None): """Write any entries in the cache to the database.""" if conn is None: conn = self.db with self.db_input_cache_lock: try: self._writeout_input_cache(conn) except sqlite3.IntegrityError: self.new_session(conn) print("ERROR! Session/line number was not unique in", "database. History logging moved to new session", self.session_number) try: # Try writing to the new session. If this fails, don't recurse self._writeout_input_cache(conn) except sqlite3.IntegrityError: pass finally: self.db_input_cache = [] with self.db_output_cache_lock: try: self._writeout_output_cache(conn) except sqlite3.IntegrityError: print("!! Session/line number for output was not unique", "in database. Output will not be stored.") finally: self.db_output_cache = [] class HistorySavingThread(threading.Thread): """This thread takes care of writing history to the database, so that the UI isn't held up while that happens. It waits for the HistoryManager's save_flag to be set, then writes out the history cache. The main thread is responsible for setting the flag when the cache size reaches a defined threshold.""" daemon = True stop_now = False def __init__(self, history_manager): super(HistorySavingThread, self).__init__() self.history_manager = history_manager atexit.register(self.stop) def run(self): # We need a separate db connection per thread: try: self.db = sqlite3.connect(self.history_manager.hist_file) while True: self.history_manager.save_flag.wait() if self.stop_now: return self.history_manager.save_flag.clear() self.history_manager.writeout_cache(self.db) except Exception as e: print(("The history saving thread hit an unexpected error (%s)." "History will not be written to the database.") % repr(e)) def stop(self): """This can be called from the main thread to safely stop this thread. Note that it does not attempt to write out remaining history before exiting. That should be done by calling the HistoryManager's end_session method.""" self.stop_now = True self.history_manager.save_flag.set() self.join() # To match, e.g. ~5/8-~2/3 range_re = re.compile(r""" ((?P<startsess>~?\d+)/)? (?P<start>\d+) # Only the start line num is compulsory ((?P<sep>[\-:]) ((?P<endsess>~?\d+)/)? (?P<end>\d+))? $""", re.VERBOSE) def extract_hist_ranges(ranges_str): """Turn a string of history ranges into 3-tuples of (session, start, stop). Examples -------- list(extract_input_ranges("~8/5-~7/4 2")) [(-8, 5, None), (-7, 1, 4), (0, 2, 3)] """ for range_str in ranges_str.split(): rmatch = range_re.match(range_str) if not rmatch: continue start = int(rmatch.group("start")) end = rmatch.group("end") end = int(end) if end else start+1 # If no end specified, get (a, a+1) if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3] end += 1 startsess = rmatch.group("startsess") or "0" endsess = rmatch.group("endsess") or startsess startsess = int(startsess.replace("~","-")) endsess = int(endsess.replace("~","-")) assert endsess >= startsess if endsess == startsess: yield (startsess, start, end) continue # Multiple sessions in one range: yield (startsess, start, None) for sess in range(startsess+1, endsess): yield (sess, 1, None) yield (endsess, 1, end) def _format_lineno(session, line): """Helper function to format line numbers properly.""" if session == 0: return str(line) return "%s#%s" % (session, line) @skip_doctest def magic_history(self, parameter_s = ''): """Print input history (_i<n> variables), with most recent last. %history -> print at most 40 inputs (some may be multi-line)\\ %history n -> print at most n inputs\\ %history n1 n2 -> print inputs between n1 and n2 (n2 not included)\\ By default, input history is printed without line numbers so it can be directly pasted into an editor. Use -n to show them. Ranges of history can be indicated using the syntax: 4 : Line 4, current session 4-6 : Lines 4-6, current session 243/1-5: Lines 1-5, session 243 ~2/7 : Line 7, session 2 before current ~8/1-~6/5 : From the first line of 8 sessions ago, to the fifth line of 6 sessions ago. Multiple ranges can be entered, separated by spaces The same syntax is used by %macro, %save, %edit, %rerun Options: -n: print line numbers for each input. This feature is only available if numbered prompts are in use. -o: also print outputs for each input. -p: print classic '>>>' python prompts before each input. This is useful for making documentation, and in conjunction with -o, for producing doctest-ready output. -r: (default) print the 'raw' history, i.e. the actual commands you typed. -t: print the 'translated' history, as IPython understands it. IPython filters your input and converts it all into valid Python source before executing it (things like magics or aliases are turned into function calls, for example). With this option, you'll see the native history instead of the user-entered version: '%cd /' will be seen as 'get_ipython().magic("%cd /")' instead of '%cd /'. -g: treat the arg as a pattern to grep for in (full) history. This includes the saved history (almost all commands ever written). Use '%hist -g' to show full saved history (may be very long). -l: get the last n lines from all sessions. Specify n as a single arg, or the default is the last 10 lines. -f FILENAME: instead of printing the output to the screen, redirect it to the given file. The file is always overwritten, though IPython asks for confirmation first if it already exists. Examples -------- :: In [6]: %hist -n 4 6 4:a = 12 5:print a**2 """ if not self.shell.displayhook.do_full_cache: print('This feature is only available if numbered prompts are in use.') return opts,args = self.parse_options(parameter_s,'noprtglf:',mode='string') # For brevity history_manager = self.shell.history_manager def _format_lineno(session, line): """Helper function to format line numbers properly.""" if session in (0, history_manager.session_number): return str(line) return "%s/%s" % (session, line) # Check if output to specific file was requested. try: outfname = opts['f'] except KeyError: outfile = io.stdout # default # We don't want to close stdout at the end! close_at_end = False else: if os.path.exists(outfname): if not io.ask_yes_no("File %r exists. Overwrite?" % outfname): print('Aborting.') return outfile = open(outfname,'w') close_at_end = True print_nums = 'n' in opts get_output = 'o' in opts pyprompts = 'p' in opts # Raw history is the default raw = not('t' in opts) default_length = 40 pattern = None if 'g' in opts: # Glob search pattern = "*" + args + "*" if args else "*" hist = history_manager.search(pattern, raw=raw, output=get_output) print_nums = True elif 'l' in opts: # Get 'tail' try: n = int(args) except ValueError, IndexError: n = 10 hist = history_manager.get_tail(n, raw=raw, output=get_output) else: if args: # Get history by ranges hist = history_manager.get_range_by_str(args, raw, get_output) else: # Just get history for the current session hist = history_manager.get_range(raw=raw, output=get_output) # We could be displaying the entire history, so let's not try to pull it # into a list in memory. Anything that needs more space will just misalign. width = 4 for session, lineno, inline in hist: # Print user history with tabs expanded to 4 spaces. The GUI clients # use hard tabs for easier usability in auto-indented code, but we want # to produce PEP-8 compliant history for safe pasting into an editor. if get_output: inline, output = inline inline = inline.expandtabs(4).rstrip() multiline = "\n" in inline line_sep = '\n' if multiline else ' ' if print_nums: print('%s:%s' % (_format_lineno(session, lineno).rjust(width), line_sep), file=outfile, end='') if pyprompts: print(">>> ", end="", file=outfile) if multiline: inline = "\n... ".join(inline.splitlines()) + "\n..." print(inline, file=outfile) if get_output and output: print(output, file=outfile) if close_at_end: outfile.close() def magic_rep(self, arg): r"""Repeat a command, or get command to input line for editing. %recall and %rep are equivalent. - %recall (no arguments): Place a string version of last computation result (stored in the special '_' variable) to the next input prompt. Allows you to create elaborate command lines without using copy-paste:: In[1]: l = ["hei", "vaan"] In[2]: "".join(l) Out[2]: heivaan In[3]: %rep In[4]: heivaan_ <== cursor blinking %recall 45 Place history line 45 on the next input prompt. Use %hist to find out the number. %recall 1-4 Combine the specified lines into one cell, and place it on the next input prompt. See %history for the slice syntax. %recall foo+bar If foo+bar can be evaluated in the user namespace, the result is placed at the next input prompt. Otherwise, the history is searched for lines which contain that substring, and the most recent one is placed at the next input prompt. """ if not arg: # Last output self.set_next_input(str(self.shell.user_ns["_"])) return # Get history range histlines = self.history_manager.get_range_by_str(arg) cmd = "\n".join(x[2] for x in histlines) if cmd: self.set_next_input(cmd.rstrip()) return try: # Variable in user namespace cmd = str(eval(arg, self.shell.user_ns)) except Exception: # Search for term in history histlines = self.history_manager.search("*"+arg+"*") for h in reversed([x[2] for x in histlines]): if 'rep' in h: continue self.set_next_input(h.rstrip()) return else: self.set_next_input(cmd.rstrip()) print("Couldn't evaluate or find in history:", arg) def magic_rerun(self, parameter_s=''): """Re-run previous input By default, you can specify ranges of input history to be repeated (as with %history). With no arguments, it will repeat the last line. Options: -l <n> : Repeat the last n lines of input, not including the current command. -g foo : Repeat the most recent line which contains foo """ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string') if "l" in opts: # Last n lines n = int(opts['l']) hist = self.history_manager.get_tail(n) elif "g" in opts: # Search p = "*"+opts['g']+"*" hist = list(self.history_manager.search(p)) for l in reversed(hist): if "rerun" not in l[2]: hist = [l] # The last match which isn't a %rerun break else: hist = [] # No matches except %rerun elif args: # Specify history ranges hist = self.history_manager.get_range_by_str(args) else: # Last line hist = self.history_manager.get_tail(1) hist = [x[2] for x in hist] if not hist: print("No lines in history match specification") return histlines = "\n".join(hist) print("=== Executing: ===") print(histlines) print("=== Output: ===") self.run_cell("\n".join(hist), store_history=False) def init_ipython(ip): ip.define_magic("rep", magic_rep) ip.define_magic("recall", magic_rep) ip.define_magic("rerun", magic_rerun) ip.define_magic("hist",magic_history) # Alternative name ip.define_magic("history",magic_history) # XXX - ipy_completers are in quarantine, need to be updated to new apis #import ipy_completers #ipy_completers.quick_completer('%hist' ,'-g -t -r -n')
# -*- encoding: iso-8859-1 -*- ############################################################################## # # Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved. # # $Id$ # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## # . Fields: # - simple # - relations (one2many, many2one, many2many) # - function # # Fields Attributes: # _classic_read: is a classic sql fields # _type : field type # readonly # required # size # import string import netsvc import psycopg import warnings import tools def _symbol_set(symb): if symb==None or symb==False: return None elif isinstance(symb, unicode): return symb.encode('utf-8') return str(symb) class _column(object): _classic_read = True _classic_write = True _properties = False _type = 'unknown' _obj = None _symbol_c = '%s' _symbol_f = _symbol_set _symbol_set = (_symbol_c, _symbol_f) _symbol_get = None def __init__(self, string='unknown', required=False, readonly=False, domain=None, context='', states=None, priority=0, change_default=False, size=None, ondelete="set null", translate=False, select=False, **args): self.states = states or {} self.string = string self.readonly = readonly self.required = required self.size = size self.help = args.get('help', '') self.priority = priority self.change_default = change_default self.ondelete = ondelete self.translate = translate self._domain = domain or [] self.relate =False self._context = context self.group_name = False self.view_load = 0 self.select=select for a in args: if args[a]: setattr(self, a, args[a]) if self.relate: warnings.warn("The relate attribute doesn't work anymore, use act_window tag instead", DeprecationWarning) def restart(self): pass def set(self, cr, obj, id, name, value, user=None, context=None): cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%d', (self._symbol_set[1](value),id) ) def get(self, cr, obj, ids, name, context=None, values=None): raise Exception, 'undefined get method !' def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): ids = obj.search(cr, uid, args+self._domain+[(name,'ilike',value)], offset, limit) res = obj.read(cr, uid, ids, [name]) return [x[name] for x in res] # --------------------------------------------------------- # Simple fields # --------------------------------------------------------- class boolean(_column): _type = 'boolean' _symbol_c = '%s' _symbol_f = lambda x: x and 'True' or 'False' _symbol_set = (_symbol_c, _symbol_f) class integer(_column): _type = 'integer' _symbol_c = '%d' _symbol_f = lambda x: int(x or 0) _symbol_set = (_symbol_c, _symbol_f) class reference(_column): _type = 'reference' def __init__(self, string, selection, size, **args): _column.__init__(self, string=string, size=size, selection=selection, **args) class char(_column): _type = 'char' def __init__(self, string, size, **args): _column.__init__(self, string=string, size=size, **args) self._symbol_set = (self._symbol_c, self._symbol_set_char) # takes a string (encoded in utf8) and returns a string (encoded in utf8) def _symbol_set_char(self, symb): #TODO: # * we need to remove the "symb==False" from the next line BUT # for now too many things rely on this broken behavior # * the symb==None test should be common to all data types if symb==None or symb==False: return None # we need to convert the string to a unicode object to be able # to evaluate its length (and possibly truncate it) reliably if isinstance(symb, str): u_symb = unicode(symb, 'utf8') elif isinstance(symb, unicode): u_symb = symb else: u_symb = unicode(symb) if len(u_symb) > self.size: return u_symb[:self.size-3].encode('utf8') + '...' else: return u_symb.encode('utf8') class text(_column): _type = 'text' import __builtin__ class float(_column): _type = 'float' _symbol_c = '%f' _symbol_f = lambda x: __builtin__.float(x or 0.0) _symbol_set = (_symbol_c, _symbol_f) def __init__(self, string='unknown', digits=None, **args): _column.__init__(self, string=string, **args) self.digits = digits # We'll need to use decimal one day or another #try: # import decimal #except ImportError: # from tools import decimal # #class float(_column): # _type = 'float' # _symbol_c = '%f' # def __init__(self, string='unknown', digits=None, **args): # _column.__init__(self, string=string, **args) # self._symbol_set = (self._symbol_c, self._symbol_set_decimal) # self.digits = digits # if not digits: # scale = 4 # else: # scale = digits[1] # self._scale = decimal.Decimal(str(10**-scale)) # self._context = decimal.Context(prec=scale, rounding=decimal.ROUND_HALF_UP) # # def _symbol_set_decimal(self, symb): # if isinstance(symb, __builtin__.float): # return decimal.Decimal('%f' % symb) # return decimal.Decimal(symb) class date(_column): _type = 'date' class datetime(_column): _type = 'datetime' class time(_column): _type = 'time' class binary(_column): _type = 'binary' _symbol_c = '%s' _symbol_f = lambda symb: symb and psycopg.Binary(symb) or None _symbol_set = (_symbol_c, _symbol_f) class selection(_column): _type = 'selection' def __init__(self, selection, string='unknown', **args): _column.__init__(self, string=string, **args) self.selection = selection def set(self, cr, obj, id, name, value, user=None, context=None): if not context: context={} #CHECKME: a priori, ceci n'est jamais appel puisque le test ci-dessous est mauvais # la raison est que selection n'est pas en classic_write = false # a noter qu'on pourrait fournir un _symbol_set specifique, et ca suffirait if value in self.selection: raise Exception, 'BAD VALUE' _column.set(self, cr, obj, id, name, value, user=None, context=context) # --------------------------------------------------------- # Relationals fields # --------------------------------------------------------- # # Values: (0, 0, { fields }) create # (1, ID, { fields }) modification # (2, ID) remove (delete) # (3, ID) unlink one (target id or target of relation) # (4, ID) link # (5) unlink all (only valid for one2many) # #CHECKME: dans la pratique c'est quoi la syntaxe utilisee pour le 5? (5) ou (5, 0)? class one2one(_column): _classic_read = False _classic_write = True _type = 'one2one' def __init__(self, obj, string='unknown', **args): warnings.warn("The one2one field doesn't work anymore", DeprecationWarning) _column.__init__(self, string=string, **args) self._obj = obj def set(self, cr, obj_src, id, field, act, user=None, context=None): if not context: context={} obj = obj_src.pool.get(self._obj) self._table = obj_src.pool.get(self._obj)._table if act[0]==0: id_new = obj.create(cr, user, act[1]) cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (id_new,id)) else: cr.execute('select '+field+' from '+obj_src._table+' where id=%d', (act[0],)) id =cr.fetchone()[0] obj.write(cr, user, [id] , act[1], context=context) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name','like',value)], offset, limit) class many2one(_column): _classic_read = False _classic_write = True _type = 'many2one' def __init__(self, obj, string='unknown', **args): _column.__init__(self, string=string, **args) self._obj = obj # # TODO: speed improvement # # name is the name of the relation field def get(self, cr, obj, ids, name, user=None, context=None, values=None): if not context: context={} if not values: values={} res = {} for r in values: res[r['id']] = r[name] for id in ids: res.setdefault(id, '') obj = obj.pool.get(self._obj) # build a dictionary of the form {'id_of_distant_resource': name_of_distant_resource} from orm import except_orm try: names = dict(obj.name_get(cr, user, filter(None, res.values()), context)) except except_orm: names={} for id in filter(None, res.values()): try: names[id] = dict(obj.name_get(cr, user, [id], context))[id] except except_orm, e: if e.name == 'AccessError': names[id] = "== Access denied ==" else : raise for r in res.keys(): if res[r] and res[r] in names: res[r] = (res[r], names[res[r]]) else: res[r] = False return res def set(self, cr, obj_src, id, field, values, user=None, context=None): if not context: context={} obj = obj_src.pool.get(self._obj) self._table = obj_src.pool.get(self._obj)._table if type(values)==type([]): for act in values: if act[0]==0: id_new = obj.create(cr, act[2]) cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (id_new,id)) elif act[0]==1: obj.write(cr, [act[1]], act[2], context=context) elif act[0]==2: cr.execute('delete from '+self._table+' where id=%d', (act[1],)) elif act[0]==3 or act[0]==5: cr.execute('update '+obj_src._table+' set '+field+'=null where id=%d', (id,)) elif act[0]==4: cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (act[1],id)) else: if values: cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (values,id)) else: cr.execute('update '+obj_src._table+' set '+field+'=null where id=%d', (id,)) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name','like',value)], offset, limit) class one2many(_column): _classic_read = False _classic_write = False _type = 'one2many' def __init__(self, obj, fields_id, string='unknown', limit=None, **args): _column.__init__(self, string=string, **args) self._obj = obj self._fields_id = fields_id self._limit = limit #one2many can't be used as condition for defaults assert(self.change_default != True) def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None): if not context: context = {} if not values: values = {} res = {} for id in ids: res[id] = [] ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit) for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'): res[r[self._fields_id]].append( r['id'] ) return res def set(self, cr, obj, id, field, values, user=None, context=None): if not context: context={} if not values: return _table = obj.pool.get(self._obj)._table obj = obj.pool.get(self._obj) for act in values: if act[0]==0: act[2][self._fields_id] = id obj.create(cr, user, act[2], context=context) elif act[0]==1: obj.write(cr, user, [act[1]] , act[2], context=context) elif act[0]==2: obj.unlink(cr, user, [act[1]], context=context) elif act[0]==3: cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%d', (act[1],)) elif act[0]==4: cr.execute('update '+_table+' set '+self._fields_id+'=%d where id=%d', (id,act[1])) elif act[0]==5: cr.execute('update '+_table+' set '+self._fields_id+'=null where '+self._fields_id+'=%d', (id,)) elif act[0]==6: if not len(act[2]): act[2] = [0] cr.execute('update '+_table+' set '+self._fields_id+'=NULL where '+self._fields_id+'=%d and id not in ('+','.join(map(str, act[2]))+')', (id,)) if len(act[2]): cr.execute('update '+_table+' set '+self._fields_id+'=%d where id in ('+','.join(map(str, act[2]))+')', (id,)) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like'): return obj.pool.get(self._obj).name_search(cr, uid, value, self._domain, offset, limit) # # Values: (0, 0, { fields }) create # (1, ID, { fields }) modification # (2, ID) remove # (3, ID) unlink # (4, ID) link # (5, ID) unlink all # (6, ?, ids) set a list of links # class many2many(_column): _classic_read = False _classic_write = False _type = 'many2many' def __init__(self, obj, rel, id1, id2, string='unknown', limit=None, **args): _column.__init__(self, string=string, **args) self._obj = obj self._rel = rel self._id1 = id1 self._id2 = id2 self._limit = limit def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None): if not context: context={} if not values: values={} res = {} if not ids: return res for id in ids: res[id] = [] ids_s = ','.join(map(str,ids)) limit_str = self._limit is not None and ' limit %d' % self._limit or '' obj = obj.pool.get(self._obj) d1, d2 = obj.pool.get('ir.rule').domain_get(cr, user, obj._name) if d1: d1 = ' and '+d1 cr.execute('SELECT '+self._rel+'.'+self._id2+','+self._rel+'.'+self._id1+' \ FROM '+self._rel+' , '+obj._table+' \ WHERE '+self._rel+'.'+self._id1+' in ('+ids_s+') \ AND '+self._rel+'.'+self._id2+' = '+obj._table+'.id '+d1 +limit_str+' order by '+obj._table+'.'+obj._order+' offset %d', d2+[offset]) for r in cr.fetchall(): res[r[1]].append(r[0]) return res def set(self, cr, obj, id, name, values, user=None, context=None): if not context: context={} if not values: return obj = obj.pool.get(self._obj) for act in values: if act[0]==0: idnew = obj.create(cr, user, act[2]) cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d,%d)', (id,idnew)) elif act[0]==1: obj.write(cr, user, [act[1]] , act[2], context=context) elif act[0]==2: obj.unlink(cr, user, [act[1]], context=context) elif act[0]==3: cr.execute('delete from '+self._rel+' where ' + self._id1 + '=%d and '+ self._id2 + '=%d', (id,act[1])) elif act[0]==4: cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d,%d)', (id,act[1])) elif act[0]==5: cr.execute('update '+self._rel+' set '+self._id2+'=null where '+self._id2+'=%d', (id,)) elif act[0]==6: d1, d2 = obj.pool.get('ir.rule').domain_get(cr, user, obj._name) if d1: d1 = ' and '+d1 cr.execute('delete from '+self._rel+' where '+self._id1+'=%d AND '+self._id2+' IN (SELECT '+self._rel+'.'+self._id2+' FROM '+self._rel+', '+obj._table+' WHERE '+self._rel+'.'+self._id1+'=%d AND '+self._rel+'.'+self._id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2 ) for act_nbr in act[2]: cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d, %d)', (id, act_nbr)) # # TODO: use a name_search # def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like'): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name',operator,value)], offset, limit) # --------------------------------------------------------- # Function fields # --------------------------------------------------------- class function(_column): _classic_read = False _classic_write = False _type = 'function' _properties = True def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, method=False, store=False, **args): _column.__init__(self, **args) self._obj = obj self._method = method self._fnct = fnct self._fnct_inv = fnct_inv self._arg = arg if 'relation' in args: self._obj = args['relation'] self._fnct_inv_arg = fnct_inv_arg if not fnct_inv: self.readonly = 1 self._type = type self._fnct_search = fnct_search self.store = store if type == 'float': self._symbol_c = '%f' self._symbol_f = lambda x: __builtin__.float(x or 0.0) self._symbol_set = (self._symbol_c, self._symbol_f) def search(self, cr, uid, obj, name, args): if not self._fnct_search: #CHECKME: should raise an exception return [] return self._fnct_search(obj, cr, uid, obj, name, args) def get(self, cr, obj, ids, name, user=None, context=None, values=None): if not context: context={} if not values: values={} res = {} table = obj._table if self._method: # TODO get HAS to receive uid for permissions ! return self._fnct(obj, cr, user, ids, name, self._arg, context) else: return self._fnct(cr, table, ids, name, self._arg, context) def set(self, cr, obj, id, name, value, user=None, context=None): if not context: context={} if self._fnct_inv: self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context) # --------------------------------------------------------- # Serialized fields # --------------------------------------------------------- class serialized(_column): def __init__(self, string='unknown', serialize_func=repr, deserialize_func=eval, type='text', **args): self._serialize_func = serialize_func self._deserialize_func = deserialize_func self._type = type self._symbol_set = (self._symbol_c, self._serialize_func) self._symbol_get = self._deserialize_func super(serialized, self).__init__(string=string, **args) class property(function): def _fnct_write(self2, self, cr, uid, id, prop, id_val, val, context=None): if not context: context={} (obj_dest,) = val definition_id = self2._field_get(self, cr, uid, prop) property = self.pool.get('ir.property') nid = property.search(cr, uid, [('fields_id','=',definition_id),('res_id','=',self._name+','+str(id))]) while len(nid): cr.execute('delete from ir_property where id=%d', (nid.pop(),)) nid = property.search(cr, uid, [('fields_id','=',definition_id),('res_id','=',False)]) default_val = False if nid: default_val = property.browse(cr, uid, nid[0], context).value company_id = self.pool.get('res.users').company_get(cr, uid, uid) res = False newval = (id_val and obj_dest+','+str(id_val)) or False if (newval != default_val) and newval: propdef = self.pool.get('ir.model.fields').browse(cr, uid, definition_id, context=context) res = property.create(cr, uid, { 'name': propdef.name, 'value': newval, 'res_id': self._name+','+str(id), 'company_id': company_id, 'fields_id': definition_id }, context=context) return res def _fnct_read(self2, self, cr, uid, ids, prop, val, context=None): if not context: context={} property = self.pool.get('ir.property') definition_id = self2._field_get(self, cr, uid, prop) nid = property.search(cr, uid, [('fields_id','=',definition_id),('res_id','=',False)]) default_val = False if nid: d = property.browse(cr, uid, nid[0], context).value default_val = (d and int(d.split(',')[1])) or False vids = map(lambda id: self._name+','+str(id), ids) nids = property.search(cr, uid, [('fields_id','=',definition_id),('res_id','in', vids)]) res = {} for id in ids: res[id]= default_val for prop in property.browse(cr, uid, nids): res[int(prop.res_id.split(',')[1])] = (prop.value and int(prop.value.split(',')[1])) or False obj = self.pool.get(self2._obj) names = dict(obj.name_get(cr, uid, filter(None, res.values()), context)) for r in res.keys(): if res[r] and res[r] in names: res[r] = (res[r], names[res[r]]) else: res[r] = False return res def _field_get(self, self2, cr, uid, prop): if not self.field_id.get(cr.dbname): cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop, self2._name)) res = cr.fetchone() self.field_id[cr.dbname] = res and res[0] return self.field_id[cr.dbname] def __init__(self, obj_prop, **args): self.field_id = {} function.__init__(self, self._fnct_read, False, self._fnct_write, (obj_prop, ), **args) def restart(self): self.field_id = {} Review property fields Improve coding guide lines bzr revid: ced-5871e8bfa7e312653fc4773de6649b53a68019f5 # -*- encoding: iso-8859-1 -*- ############################################################################## # # Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved. # # $Id$ # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## # . Fields: # - simple # - relations (one2many, many2one, many2many) # - function # # Fields Attributes: # _classic_read: is a classic sql fields # _type : field type # readonly # required # size # import string import netsvc import psycopg import warnings import tools def _symbol_set(symb): if symb==None or symb==False: return None elif isinstance(symb, unicode): return symb.encode('utf-8') return str(symb) class _column(object): _classic_read = True _classic_write = True _properties = False _type = 'unknown' _obj = None _symbol_c = '%s' _symbol_f = _symbol_set _symbol_set = (_symbol_c, _symbol_f) _symbol_get = None def __init__(self, string='unknown', required=False, readonly=False, domain=None, context='', states=None, priority=0, change_default=False, size=None, ondelete="set null", translate=False, select=False, **args): self.states = states or {} self.string = string self.readonly = readonly self.required = required self.size = size self.help = args.get('help', '') self.priority = priority self.change_default = change_default self.ondelete = ondelete self.translate = translate self._domain = domain or [] self.relate =False self._context = context self.group_name = False self.view_load = 0 self.select=select for a in args: if args[a]: setattr(self, a, args[a]) if self.relate: warnings.warn("The relate attribute doesn't work anymore, use act_window tag instead", DeprecationWarning) def restart(self): pass def set(self, cr, obj, id, name, value, user=None, context=None): cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%d', (self._symbol_set[1](value),id) ) def get(self, cr, obj, ids, name, context=None, values=None): raise Exception, 'undefined get method !' def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): ids = obj.search(cr, uid, args+self._domain+[(name,'ilike',value)], offset, limit) res = obj.read(cr, uid, ids, [name]) return [x[name] for x in res] # --------------------------------------------------------- # Simple fields # --------------------------------------------------------- class boolean(_column): _type = 'boolean' _symbol_c = '%s' _symbol_f = lambda x: x and 'True' or 'False' _symbol_set = (_symbol_c, _symbol_f) class integer(_column): _type = 'integer' _symbol_c = '%d' _symbol_f = lambda x: int(x or 0) _symbol_set = (_symbol_c, _symbol_f) class reference(_column): _type = 'reference' def __init__(self, string, selection, size, **args): _column.__init__(self, string=string, size=size, selection=selection, **args) class char(_column): _type = 'char' def __init__(self, string, size, **args): _column.__init__(self, string=string, size=size, **args) self._symbol_set = (self._symbol_c, self._symbol_set_char) # takes a string (encoded in utf8) and returns a string (encoded in utf8) def _symbol_set_char(self, symb): #TODO: # * we need to remove the "symb==False" from the next line BUT # for now too many things rely on this broken behavior # * the symb==None test should be common to all data types if symb==None or symb==False: return None # we need to convert the string to a unicode object to be able # to evaluate its length (and possibly truncate it) reliably if isinstance(symb, str): u_symb = unicode(symb, 'utf8') elif isinstance(symb, unicode): u_symb = symb else: u_symb = unicode(symb) if len(u_symb) > self.size: return u_symb[:self.size-3].encode('utf8') + '...' else: return u_symb.encode('utf8') class text(_column): _type = 'text' import __builtin__ class float(_column): _type = 'float' _symbol_c = '%f' _symbol_f = lambda x: __builtin__.float(x or 0.0) _symbol_set = (_symbol_c, _symbol_f) def __init__(self, string='unknown', digits=None, **args): _column.__init__(self, string=string, **args) self.digits = digits # We'll need to use decimal one day or another #try: # import decimal #except ImportError: # from tools import decimal # #class float(_column): # _type = 'float' # _symbol_c = '%f' # def __init__(self, string='unknown', digits=None, **args): # _column.__init__(self, string=string, **args) # self._symbol_set = (self._symbol_c, self._symbol_set_decimal) # self.digits = digits # if not digits: # scale = 4 # else: # scale = digits[1] # self._scale = decimal.Decimal(str(10**-scale)) # self._context = decimal.Context(prec=scale, rounding=decimal.ROUND_HALF_UP) # # def _symbol_set_decimal(self, symb): # if isinstance(symb, __builtin__.float): # return decimal.Decimal('%f' % symb) # return decimal.Decimal(symb) class date(_column): _type = 'date' class datetime(_column): _type = 'datetime' class time(_column): _type = 'time' class binary(_column): _type = 'binary' _symbol_c = '%s' _symbol_f = lambda symb: symb and psycopg.Binary(symb) or None _symbol_set = (_symbol_c, _symbol_f) class selection(_column): _type = 'selection' def __init__(self, selection, string='unknown', **args): _column.__init__(self, string=string, **args) self.selection = selection def set(self, cr, obj, id, name, value, user=None, context=None): if not context: context={} #CHECKME: a priori, ceci n'est jamais appel puisque le test ci-dessous est mauvais # la raison est que selection n'est pas en classic_write = false # a noter qu'on pourrait fournir un _symbol_set specifique, et ca suffirait if value in self.selection: raise Exception, 'BAD VALUE' _column.set(self, cr, obj, id, name, value, user=None, context=context) # --------------------------------------------------------- # Relationals fields # --------------------------------------------------------- # # Values: (0, 0, { fields }) create # (1, ID, { fields }) modification # (2, ID) remove (delete) # (3, ID) unlink one (target id or target of relation) # (4, ID) link # (5) unlink all (only valid for one2many) # #CHECKME: dans la pratique c'est quoi la syntaxe utilisee pour le 5? (5) ou (5, 0)? class one2one(_column): _classic_read = False _classic_write = True _type = 'one2one' def __init__(self, obj, string='unknown', **args): warnings.warn("The one2one field doesn't work anymore", DeprecationWarning) _column.__init__(self, string=string, **args) self._obj = obj def set(self, cr, obj_src, id, field, act, user=None, context=None): if not context: context={} obj = obj_src.pool.get(self._obj) self._table = obj_src.pool.get(self._obj)._table if act[0]==0: id_new = obj.create(cr, user, act[1]) cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (id_new,id)) else: cr.execute('select '+field+' from '+obj_src._table+' where id=%d', (act[0],)) id =cr.fetchone()[0] obj.write(cr, user, [id] , act[1], context=context) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name','like',value)], offset, limit) class many2one(_column): _classic_read = False _classic_write = True _type = 'many2one' def __init__(self, obj, string='unknown', **args): _column.__init__(self, string=string, **args) self._obj = obj # # TODO: speed improvement # # name is the name of the relation field def get(self, cr, obj, ids, name, user=None, context=None, values=None): if not context: context={} if not values: values={} res = {} for r in values: res[r['id']] = r[name] for id in ids: res.setdefault(id, '') obj = obj.pool.get(self._obj) # build a dictionary of the form {'id_of_distant_resource': name_of_distant_resource} from orm import except_orm try: names = dict(obj.name_get(cr, user, filter(None, res.values()), context)) except except_orm: names={} for id in filter(None, res.values()): try: names[id] = dict(obj.name_get(cr, user, [id], context))[id] except except_orm, e: if e.name == 'AccessError': names[id] = "== Access denied ==" else : raise for r in res.keys(): if res[r] and res[r] in names: res[r] = (res[r], names[res[r]]) else: res[r] = False return res def set(self, cr, obj_src, id, field, values, user=None, context=None): if not context: context={} obj = obj_src.pool.get(self._obj) self._table = obj_src.pool.get(self._obj)._table if type(values)==type([]): for act in values: if act[0]==0: id_new = obj.create(cr, act[2]) cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (id_new,id)) elif act[0]==1: obj.write(cr, [act[1]], act[2], context=context) elif act[0]==2: cr.execute('delete from '+self._table+' where id=%d', (act[1],)) elif act[0]==3 or act[0]==5: cr.execute('update '+obj_src._table+' set '+field+'=null where id=%d', (id,)) elif act[0]==4: cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (act[1],id)) else: if values: cr.execute('update '+obj_src._table+' set '+field+'=%d where id=%d', (values,id)) else: cr.execute('update '+obj_src._table+' set '+field+'=null where id=%d', (id,)) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name','like',value)], offset, limit) class one2many(_column): _classic_read = False _classic_write = False _type = 'one2many' def __init__(self, obj, fields_id, string='unknown', limit=None, **args): _column.__init__(self, string=string, **args) self._obj = obj self._fields_id = fields_id self._limit = limit #one2many can't be used as condition for defaults assert(self.change_default != True) def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None): if not context: context = {} if not values: values = {} res = {} for id in ids: res[id] = [] ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit) for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'): res[r[self._fields_id]].append( r['id'] ) return res def set(self, cr, obj, id, field, values, user=None, context=None): if not context: context={} if not values: return _table = obj.pool.get(self._obj)._table obj = obj.pool.get(self._obj) for act in values: if act[0]==0: act[2][self._fields_id] = id obj.create(cr, user, act[2], context=context) elif act[0]==1: obj.write(cr, user, [act[1]] , act[2], context=context) elif act[0]==2: obj.unlink(cr, user, [act[1]], context=context) elif act[0]==3: cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%d', (act[1],)) elif act[0]==4: cr.execute('update '+_table+' set '+self._fields_id+'=%d where id=%d', (id,act[1])) elif act[0]==5: cr.execute('update '+_table+' set '+self._fields_id+'=null where '+self._fields_id+'=%d', (id,)) elif act[0]==6: if not len(act[2]): act[2] = [0] cr.execute('update '+_table+' set '+self._fields_id+'=NULL where '+self._fields_id+'=%d and id not in ('+','.join(map(str, act[2]))+')', (id,)) if len(act[2]): cr.execute('update '+_table+' set '+self._fields_id+'=%d where id in ('+','.join(map(str, act[2]))+')', (id,)) def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like'): return obj.pool.get(self._obj).name_search(cr, uid, value, self._domain, offset, limit) # # Values: (0, 0, { fields }) create # (1, ID, { fields }) modification # (2, ID) remove # (3, ID) unlink # (4, ID) link # (5, ID) unlink all # (6, ?, ids) set a list of links # class many2many(_column): _classic_read = False _classic_write = False _type = 'many2many' def __init__(self, obj, rel, id1, id2, string='unknown', limit=None, **args): _column.__init__(self, string=string, **args) self._obj = obj self._rel = rel self._id1 = id1 self._id2 = id2 self._limit = limit def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None): if not context: context={} if not values: values={} res = {} if not ids: return res for id in ids: res[id] = [] ids_s = ','.join(map(str,ids)) limit_str = self._limit is not None and ' limit %d' % self._limit or '' obj = obj.pool.get(self._obj) d1, d2 = obj.pool.get('ir.rule').domain_get(cr, user, obj._name) if d1: d1 = ' and '+d1 cr.execute('SELECT '+self._rel+'.'+self._id2+','+self._rel+'.'+self._id1+' \ FROM '+self._rel+' , '+obj._table+' \ WHERE '+self._rel+'.'+self._id1+' in ('+ids_s+') \ AND '+self._rel+'.'+self._id2+' = '+obj._table+'.id '+d1 +limit_str+' order by '+obj._table+'.'+obj._order+' offset %d', d2+[offset]) for r in cr.fetchall(): res[r[1]].append(r[0]) return res def set(self, cr, obj, id, name, values, user=None, context=None): if not context: context={} if not values: return obj = obj.pool.get(self._obj) for act in values: if act[0]==0: idnew = obj.create(cr, user, act[2]) cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d,%d)', (id,idnew)) elif act[0]==1: obj.write(cr, user, [act[1]] , act[2], context=context) elif act[0]==2: obj.unlink(cr, user, [act[1]], context=context) elif act[0]==3: cr.execute('delete from '+self._rel+' where ' + self._id1 + '=%d and '+ self._id2 + '=%d', (id,act[1])) elif act[0]==4: cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d,%d)', (id,act[1])) elif act[0]==5: cr.execute('update '+self._rel+' set '+self._id2+'=null where '+self._id2+'=%d', (id,)) elif act[0]==6: d1, d2 = obj.pool.get('ir.rule').domain_get(cr, user, obj._name) if d1: d1 = ' and '+d1 cr.execute('delete from '+self._rel+' where '+self._id1+'=%d AND '+self._id2+' IN (SELECT '+self._rel+'.'+self._id2+' FROM '+self._rel+', '+obj._table+' WHERE '+self._rel+'.'+self._id1+'=%d AND '+self._rel+'.'+self._id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2 ) for act_nbr in act[2]: cr.execute('insert into '+self._rel+' ('+self._id1+','+self._id2+') values (%d, %d)', (id, act_nbr)) # # TODO: use a name_search # def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like'): return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name',operator,value)], offset, limit) # --------------------------------------------------------- # Function fields # --------------------------------------------------------- class function(_column): _classic_read = False _classic_write = False _type = 'function' _properties = True def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, method=False, store=False, **args): _column.__init__(self, **args) self._obj = obj self._method = method self._fnct = fnct self._fnct_inv = fnct_inv self._arg = arg if 'relation' in args: self._obj = args['relation'] self._fnct_inv_arg = fnct_inv_arg if not fnct_inv: self.readonly = 1 self._type = type self._fnct_search = fnct_search self.store = store if type == 'float': self._symbol_c = '%f' self._symbol_f = lambda x: __builtin__.float(x or 0.0) self._symbol_set = (self._symbol_c, self._symbol_f) def search(self, cr, uid, obj, name, args): if not self._fnct_search: #CHECKME: should raise an exception return [] return self._fnct_search(obj, cr, uid, obj, name, args) def get(self, cr, obj, ids, name, user=None, context=None, values=None): if not context: context={} if not values: values={} res = {} table = obj._table if self._method: # TODO get HAS to receive uid for permissions ! return self._fnct(obj, cr, user, ids, name, self._arg, context) else: return self._fnct(cr, table, ids, name, self._arg, context) def set(self, cr, obj, id, name, value, user=None, context=None): if not context: context={} if self._fnct_inv: self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context) # --------------------------------------------------------- # Serialized fields # --------------------------------------------------------- class serialized(_column): def __init__(self, string='unknown', serialize_func=repr, deserialize_func=eval, type='text', **args): self._serialize_func = serialize_func self._deserialize_func = deserialize_func self._type = type self._symbol_set = (self._symbol_c, self._serialize_func) self._symbol_get = self._deserialize_func super(serialized, self).__init__(string=string, **args) class property(function): def _fnct_write(self, obj, cr, uid, id, prop, id_val, val, context=None): if not context: context={} (obj_dest,) = val definition_id = self._field_get(cr, uid, obj._name, prop) property = obj.pool.get('ir.property') nid = property.search(cr, uid, [('fields_id', '=', definition_id), ('res_id', '=', obj._name+','+str(id))]) while len(nid): cr.execute('DELETE FROM ir_property WHERE id=%d', (nid.pop(),)) nid = property.search(cr, uid, [('fields_id', '=', definition_id), ('res_id', '=', False)]) default_val = False if nid: default_val = property.browse(cr, uid, nid[0], context).value company_id = obj.pool.get('res.users').company_get(cr, uid, uid) res = False newval = (id_val and obj_dest+','+str(id_val)) or False if (newval != default_val) and newval: propdef = obj.pool.get('ir.model.fields').browse(cr, uid, definition_id, context=context) res = property.create(cr, uid, { 'name': propdef.name, 'value': newval, 'res_id': obj._name+','+str(id), 'company_id': company_id, 'fields_id': definition_id }, context=context) return res def _fnct_read(self, obj, cr, uid, ids, prop, val, context=None): if not context: context={} property = obj.pool.get('ir.property') definition_id = self._field_get(cr, uid, obj._name, prop) nid = property.search(cr, uid, [('fields_id', '=', definition_id), ('res_id', '=', False)]) default_val = False if nid: d = property.browse(cr, uid, nid[0], context).value default_val = (d and int(d.split(',')[1])) or False vids = [obj._name + ',' + str(id) for id in ids] nids = property.search(cr, uid, [('fields_id', '=', definition_id), ('res_id', 'in', vids)]) res = {} for id in ids: res[id]= default_val for prop in property.browse(cr, uid, nids): res[int(prop.res_id.split(',')[1])] = (prop.value and \ int(prop.value.split(',')[1])) or False obj = obj.pool.get(self._obj) names = dict(obj.name_get(cr, uid, filter(None, res.values()), context)) for r in res.keys(): if res[r] and res[r] in names: res[r] = (res[r], names[res[r]]) else: res[r] = False return res def _field_get(self, cr, uid, model_name, prop): if not self.field_id.get(cr.dbname): cr.execute('SELECT id \ FROM ir_model_fields \ WHERE name=%s AND model=%s', (prop, model_name)) res = cr.fetchone() self.field_id[cr.dbname] = res and res[0] return self.field_id[cr.dbname] def __init__(self, obj_prop, **args): self.field_id = {} function.__init__(self, self._fnct_read, False, self._fnct_write, (obj_prop, ), **args) def restart(self): self.field_id = {}
#!/usr/bin/env python """A simple interactive kernel that talks to a frontend over 0MQ. Things to do: * Implement `set_parent` logic. Right before doing exec, the Kernel should call set_parent on all the PUB objects with the message about to be executed. * Implement random port and security key logic. * Implement control messages. * Implement event loop and poll version. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Standard library imports import __builtin__ import atexit import sys import time import traceback import logging import uuid from datetime import datetime from signal import ( signal, getsignal, default_int_handler, SIGINT, SIG_IGN ) # System library imports import zmq from zmq.eventloop import ioloop from zmq.eventloop.zmqstream import ZMQStream # Local imports from IPython.core import pylabtools from IPython.config.configurable import Configurable from IPython.config.application import boolean_flag, catch_config_error, Application from IPython.core.application import ProfileDir from IPython.core.error import StdinNotImplementedError from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from IPython.utils import io from IPython.utils import py3compat from IPython.utils.frame import extract_module_locals from IPython.utils.jsonutil import json_clean from IPython.utils.traitlets import ( Any, Instance, Float, Dict, CaselessStrEnum, List, Set, Integer, Unicode ) from entry_point import base_launch_kernel from kernelapp import KernelApp, kernel_flags, kernel_aliases from serialize import serialize_object, unpack_apply_message from session import Session, Message from zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- class Kernel(Configurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- # attribute to override with a GUI eventloop = Any(None) def _eventloop_changed(self, name, old, new): """schedule call to eventloop from IOLoop""" loop = ioloop.IOLoop.instance() loop.add_timeout(time.time()+0.1, self.enter_eventloop) shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') session = Instance(Session) profile_dir = Instance('IPython.core.profiledir.ProfileDir') shell_streams = List() control_stream = Instance(ZMQStream) iopub_socket = Instance(zmq.Socket) stdin_socket = Instance(zmq.Socket) log = Instance(logging.Logger) user_module = Any() def _user_module_changed(self, name, old, new): if self.shell is not None: self.shell.user_module = new user_ns = Dict(default_value=None) def _user_ns_changed(self, name, old, new): if self.shell is not None: self.shell.user_ns = new self.shell.init_user_ns() # identities: int_id = Integer(-1) ident = Unicode() def _ident_default(self): return unicode(uuid.uuid4()) # Private interface # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005, config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.05, config=True) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = Dict() # set of aborted msg_ids aborted = Set() def __init__(self, **kwargs): super(Kernel, self).__init__(**kwargs) # Initialize the InteractiveShell subclass self.shell = ZMQInteractiveShell.instance(config=self.config, profile_dir = self.profile_dir, user_module = self.user_module, user_ns = self.user_ns, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('pyout') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket # TMP - hack while developing self.shell._reply_content = None # Build dict of handlers for message types msg_types = [ 'execute_request', 'complete_request', 'object_info_request', 'history_request', 'connect_request', 'shutdown_request', 'apply_request', ] self.shell_handlers = {} for msg_type in msg_types: self.shell_handlers[msg_type] = getattr(self, msg_type) control_msg_types = msg_types + [ 'clear_request', 'abort_request' ] self.control_handlers = {} for msg_type in control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) def dispatch_control(self, msg): """dispatch control requests""" idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Control Message", exc_info=True) return self.log.debug("Control received: %s", msg) header = msg['header'] msg_id = header['msg_id'] msg_type = header['msg_type'] handler = self.control_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: handler(self.control_stream, idents, msg) except Exception: self.log.error("Exception in control handler:", exc_info=True) def dispatch_shell(self, stream, msg): """dispatch shell requests""" # flush control requests first if self.control_stream: self.control_stream.flush() idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Message", exc_info=True) return header = msg['header'] msg_id = header['msg_id'] msg_type = msg['header']['msg_type'] # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type) self.log.debug(' Content: %s\n --->\n ', msg['content']) if msg_id in self.aborted: self.aborted.remove(msg_id) # is it safe to assume a msg_id will not be resubmitted? reply_type = msg_type.split('_')[0] + '_reply' status = {'status' : 'aborted'} sub = {'engine' : self.ident} sub.update(status) reply_msg = self.session.send(stream, reply_type, subheader=sub, content=status, parent=msg, ident=idents) return handler = self.shell_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type) else: # ensure default_int_handler during handler call sig = signal(SIGINT, default_int_handler) try: handler(stream, idents, msg) except Exception: self.log.error("Exception in message handler:", exc_info=True) finally: signal(SIGINT, sig) def enter_eventloop(self): """enter eventloop""" self.log.info("entering eventloop") # restore default_int_handler signal(SIGINT, default_int_handler) while self.eventloop is not None: try: self.eventloop(self) except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") continue else: # eventloop exited cleanly, this means we should stop (right?) self.eventloop = None break self.log.info("exiting eventloop") # if eventloop exits, IOLoop should stop ioloop.IOLoop.instance().stop() def start(self): """register dispatchers for streams""" self.shell.exit_now = False if self.control_stream: self.control_stream.on_recv(self.dispatch_control, copy=False) def make_dispatcher(stream): def dispatcher(msg): return self.dispatch_shell(stream, msg) return dispatcher for s in self.shell_streams: s.on_recv(make_dispatcher(s), copy=False) def do_one_iteration(self): """step eventloop just once""" if self.control_stream: self.control_stream.flush() for stream in self.shell_streams: # handle at most one request per iteration stream.flush(zmq.POLLIN, 1) stream.flush(zmq.POLLOUT) def record_ports(self, ports): """Record the ports that this kernel is using. The creator of the Kernel instance must call this methods if they want the :meth:`connect_request` method to return the port numbers. """ self._recorded_ports = ports #--------------------------------------------------------------------------- # Kernel request handlers #--------------------------------------------------------------------------- def _make_subheader(self): """init subheader dict, for execute/apply_reply""" return { 'dependencies_met' : True, 'engine' : self.ident, 'started': datetime.now(), } def _publish_pyin(self, code, parent, execution_count): """Publish the code request on the pyin stream.""" self.session.send(self.iopub_socket, u'pyin', {u'code':code, u'execution_count': execution_count}, parent=parent, ident=self._topic('pyin') ) def execute_request(self, stream, ident, parent): self.session.send(self.iopub_socket, u'status', {u'execution_state':u'busy'}, parent=parent, ident=self._topic('status'), ) try: content = parent[u'content'] code = content[u'code'] silent = content[u'silent'] except: self.log.error("Got bad msg: ") self.log.error("%s", parent) return sub = self._make_subheader() shell = self.shell # we'll need this a lot here # Replace raw_input. Note that is not sufficient to replace # raw_input in the user namespace. if content.get('allow_stdin', False): raw_input = lambda prompt='': self._raw_input(prompt, ident, parent) else: raw_input = lambda prompt='' : self._no_raw_input() if py3compat.PY3: __builtin__.input = raw_input else: __builtin__.raw_input = raw_input # Set the parent message of the display hook and out streams. shell.displayhook.set_parent(parent) shell.display_pub.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self._publish_pyin(code, parent, shell.execution_count) reply_content = {} try: # FIXME: the shell calls the exception handler itself. shell.run_cell(code, store_history=not silent, silent=silent) except: status = u'error' # FIXME: this code right now isn't being used yet by default, # because the run_cell() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb = sys.exc_info() tb_list = traceback.format_exception(etype, evalue, tb) reply_content.update(shell._showtraceback(etype, evalue, tb_list)) else: status = u'ok' reply_content[u'status'] = status # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 # FIXME - fish exception info out of shell, possibly left there by # runlines. We'll need to clean up this logic later. if shell._reply_content is not None: reply_content.update(shell._reply_content) # reset after use shell._reply_content = None # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_variables/expressions if reply_content['status'] == 'ok': reply_content[u'user_variables'] = \ shell.user_variables(content.get(u'user_variables', [])) reply_content[u'user_expressions'] = \ shell.user_expressions(content.get(u'user_expressions', {})) else: # If there was an error, don't even try to compute variables or # expressions reply_content[u'user_variables'] = {} reply_content[u'user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and clear the payload system always. reply_content[u'payload'] = shell.payload_manager.read_payload() # Be agressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) # Send the reply. reply_content = json_clean(reply_content) sub['status'] = reply_content['status'] if reply_content['status'] == 'error' and \ reply_content['ename'] == 'UnmetDependency': sub['dependencies_met'] = False reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent, subheader=sub, ident=ident) self.log.debug("%s", reply_msg) if not silent and reply_msg['content']['status'] == u'error': self._abort_queues() self.session.send(self.iopub_socket, u'status', {u'execution_state':u'idle'}, parent=parent, ident=self._topic('status')) def complete_request(self, stream, ident, parent): txt, matches = self._complete(parent) matches = {'matches' : matches, 'matched_text' : txt, 'status' : 'ok'} matches = json_clean(matches) completion_msg = self.session.send(stream, 'complete_reply', matches, parent, ident) self.log.debug("%s", completion_msg) def object_info_request(self, stream, ident, parent): content = parent['content'] object_info = self.shell.object_inspect(content['oname'], detail_level = content.get('detail_level', 0) ) # Before we send this object over, we scrub it for JSON usage oinfo = json_clean(object_info) msg = self.session.send(stream, 'object_info_reply', oinfo, parent, ident) self.log.debug("%s", msg) def history_request(self, stream, ident, parent): # We need to pull these out, as passing **kwargs doesn't work with # unicode keys before Python 2.6.5. hist_access_type = parent['content']['hist_access_type'] raw = parent['content']['raw'] output = parent['content']['output'] if hist_access_type == 'tail': n = parent['content']['n'] hist = self.shell.history_manager.get_tail(n, raw=raw, output=output, include_latest=True) elif hist_access_type == 'range': session = parent['content']['session'] start = parent['content']['start'] stop = parent['content']['stop'] hist = self.shell.history_manager.get_range(session, start, stop, raw=raw, output=output) elif hist_access_type == 'search': pattern = parent['content']['pattern'] hist = self.shell.history_manager.search(pattern, raw=raw, output=output) else: hist = [] hist = list(hist) content = {'history' : hist} content = json_clean(content) msg = self.session.send(stream, 'history_reply', content, parent, ident) self.log.debug("Sending history reply with %i entries", len(hist)) def connect_request(self, stream, ident, parent): if self._recorded_ports is not None: content = self._recorded_ports.copy() else: content = {} msg = self.session.send(stream, 'connect_reply', content, parent, ident) self.log.debug("%s", msg) def shutdown_request(self, stream, ident, parent): self.shell.exit_now = True content = dict(status='ok') content.update(parent['content']) self.session.send(stream, u'shutdown_reply', content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg(u'shutdown_reply', content, parent ) self._at_shutdown() # call sys.exit after a short delay loop = ioloop.IOLoop.instance() loop.add_timeout(time.time()+0.1, loop.stop) #--------------------------------------------------------------------------- # Engine methods #--------------------------------------------------------------------------- def apply_request(self, stream, ident, parent): try: content = parent[u'content'] bufs = parent[u'buffers'] msg_id = parent['header']['msg_id'] except: self.log.error("Got bad msg: %s", parent, exc_info=True) return # Set the parent message of the display hook and out streams. self.shell.displayhook.set_parent(parent) self.shell.display_pub.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent) # self.iopub_socket.send(pyin_msg) # self.session.send(self.iopub_socket, u'pyin', {u'code':code},parent=parent) sub = self._make_subheader() try: working = self.shell.user_ns prefix = "_"+str(msg_id).replace("-","")+"_" f,args,kwargs = unpack_apply_message(bufs, working, copy=False) fname = getattr(f, '__name__', 'f') fname = prefix+"f" argname = prefix+"args" kwargname = prefix+"kwargs" resultname = prefix+"result" ns = { fname : f, argname : args, kwargname : kwargs , resultname : None } # print ns working.update(ns) code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname) try: exec code in self.shell.user_global_ns, self.shell.user_ns result = working.get(resultname) finally: for key in ns.iterkeys(): working.pop(key) packed_result,buf = serialize_object(result) result_buf = [packed_result]+buf except: exc_content = self._wrap_exception('apply') # exc_msg = self.session.msg(u'pyerr', exc_content, parent) self.session.send(self.iopub_socket, u'pyerr', exc_content, parent=parent, ident=self._topic('pyerr')) reply_content = exc_content result_buf = [] if exc_content['ename'] == 'UnmetDependency': sub['dependencies_met'] = False else: reply_content = {'status' : 'ok'} # put 'ok'/'error' status in header, for scheduler introspection: sub['status'] = reply_content['status'] # flush i/o sys.stdout.flush() sys.stderr.flush() reply_msg = self.session.send(stream, u'apply_reply', reply_content, parent=parent, ident=ident,buffers=result_buf, subheader=sub) #--------------------------------------------------------------------------- # Control messages #--------------------------------------------------------------------------- def abort_request(self, stream, ident, parent): """abort a specifig msg by id""" msg_ids = parent['content'].get('msg_ids', None) if isinstance(msg_ids, basestring): msg_ids = [msg_ids] if not msg_ids: self.abort_queues() for mid in msg_ids: self.aborted.add(str(mid)) content = dict(status='ok') reply_msg = self.session.send(stream, 'abort_reply', content=content, parent=parent, ident=ident) self.log.debug("%s", reply_msg) def clear_request(self, stream, idents, parent): """Clear our namespace.""" self.shell.reset(False) msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent, content = dict(status='ok')) #--------------------------------------------------------------------------- # Protected interface #--------------------------------------------------------------------------- def _wrap_exception(self, method=None): # import here, because _wrap_exception is only used in parallel, # and parallel has higher min pyzmq version from IPython.parallel.error import wrap_exception e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method) content = wrap_exception(e_info) return content def _topic(self, topic): """prefixed topic for IOPub messages""" if self.int_id >= 0: base = "engine.%i" % self.int_id else: base = "kernel.%s" % self.ident return py3compat.cast_bytes("%s.%s" % (base, topic)) def _abort_queues(self): for stream in self.shell_streams: if stream: self._abort_queue(stream) def _abort_queue(self, stream): poller = zmq.Poller() poller.register(stream.socket, zmq.POLLIN) while True: idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True) if msg is None: return self.log.info("Aborting:") self.log.info("%s", msg) msg_type = msg['header']['msg_type'] reply_type = msg_type.split('_')[0] + '_reply' status = {'status' : 'aborted'} sub = {'engine' : self.ident} sub.update(status) reply_msg = self.session.send(stream, reply_type, subheader=sub, content=status, parent=msg, ident=idents) self.log.debug("%s", reply_msg) # We need to wait a bit for requests to come in. This can probably # be set shorter for true asynchronous clients. poller.poll(50) def _no_raw_input(self): """Raise StdinNotImplentedError if active frontend doesn't support stdin.""" raise StdinNotImplementedError("raw_input was called, but this " "frontend does not support stdin.") def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # Send the input request. content = json_clean(dict(prompt=prompt)) self.session.send(self.stdin_socket, u'input_request', content, parent, ident=ident) # Await a response. while True: try: ident, reply = self.session.recv(self.stdin_socket, 0) except Exception: self.log.warn("Invalid Message:", exc_info=True) else: break try: value = reply['content']['value'] except: self.log.error("Got bad raw_input reply: ") self.log.error("%s", parent) value = '' if value == '\x04': # EOF raise EOFError return value def _complete(self, msg): c = msg['content'] try: cpos = int(c['cursor_pos']) except: # If we don't get something that we can convert to an integer, at # least attempt the completion guessing the cursor is at the end of # the text, if there's any, and otherwise of the line cpos = len(c['text']) if cpos==0: cpos = len(c['line']) return self.shell.complete(c['text'], c['line'], cpos) def _object_info(self, context): symbol, leftover = self._symbol_from_context(context) if symbol is not None and not leftover: doc = getattr(symbol, '__doc__', '') else: doc = '' object_info = dict(docstring = doc) return object_info def _symbol_from_context(self, context): if not context: return None, context base_symbol_string = context[0] symbol = self.shell.user_ns.get(base_symbol_string, None) if symbol is None: symbol = __builtin__.__dict__.get(base_symbol_string, None) if symbol is None: return None, context context = context[1:] for i, name in enumerate(context): new_symbol = getattr(symbol, name, None) if new_symbol is None: return symbol, context[i:] else: symbol = new_symbol return symbol, [] def _at_shutdown(self): """Actions taken at shutdown by the kernel, called by python's atexit. """ # io.rprint("Kernel at_shutdown") # dbg if self._shutdown_message is not None: self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown')) self.log.debug("%s", self._shutdown_message) [ s.flush(zmq.POLLOUT) for s in self.shell_streams ] #----------------------------------------------------------------------------- # Aliases and Flags for the IPKernelApp #----------------------------------------------------------------------------- flags = dict(kernel_flags) flags.update(shell_flags) addflag = lambda *args: flags.update(boolean_flag(*args)) flags['pylab'] = ( {'IPKernelApp' : {'pylab' : 'auto'}}, """Pre-load matplotlib and numpy for interactive use with the default matplotlib backend.""" ) aliases = dict(kernel_aliases) aliases.update(shell_aliases) # it's possible we don't want short aliases for *all* of these: aliases.update(dict( pylab='IPKernelApp.pylab', )) #----------------------------------------------------------------------------- # The IPKernelApp class #----------------------------------------------------------------------------- class IPKernelApp(KernelApp, InteractiveShellApp): name = 'ipkernel' aliases = Dict(aliases) flags = Dict(flags) classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session] # configurables pylab = CaselessStrEnum(['tk', 'qt', 'wx', 'gtk', 'osx', 'inline', 'auto'], config=True, help="""Pre-load matplotlib and numpy for interactive use, selecting a particular matplotlib backend and loop integration. """ ) @catch_config_error def initialize(self, argv=None): super(IPKernelApp, self).initialize(argv) self.init_path() self.init_shell() self.init_extensions() self.init_code() def init_kernel(self): shell_stream = ZMQStream(self.shell_socket) kernel = Kernel(config=self.config, session=self.session, shell_streams=[shell_stream], iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, ) self.kernel = kernel kernel.record_ports(self.ports) shell = kernel.shell if self.pylab: try: gui, backend = pylabtools.find_gui_and_backend(self.pylab) shell.enable_pylab(gui, import_all=self.pylab_import_all) except Exception: self.log.error("Pylab initialization failed", exc_info=True) # print exception straight to stdout, because normally # _showtraceback associates the reply with an execution, # which means frontends will never draw it, as this exception # is not associated with any execute request. # replace pyerr-sending traceback with stdout _showtraceback = shell._showtraceback def print_tb(etype, evalue, stb): print ("Error initializing pylab, pylab mode will not " "be active", file=io.stderr) print (shell.InteractiveTB.stb2text(stb), file=io.stdout) shell._showtraceback = print_tb # send the traceback over stdout shell.showtraceback(tb_offset=0) # restore proper _showtraceback method shell._showtraceback = _showtraceback def init_shell(self): self.shell = self.kernel.shell self.shell.configurables.append(self) #----------------------------------------------------------------------------- # Kernel main and launch functions #----------------------------------------------------------------------------- def launch_kernel(*args, **kwargs): """Launches a localhost IPython kernel, binding to the specified ports. This function simply calls entry_point.base_launch_kernel with the right first command to start an ipkernel. See base_launch_kernel for arguments. Returns ------- A tuple of form: (kernel_process, shell_port, iopub_port, stdin_port, hb_port) where kernel_process is a Popen object and the ports are integers. """ return base_launch_kernel('from IPython.zmq.ipkernel import main; main()', *args, **kwargs) def embed_kernel(module=None, local_ns=None, **kwargs): """Embed and start an IPython kernel in a given scope. Parameters ---------- module : ModuleType, optional The module to load into IPython globals (default: caller) local_ns : dict, optional The namespace to load into IPython user namespace (default: caller) kwargs : various, optional Further keyword args are relayed to the KernelApp constructor, allowing configuration of the Kernel. Will only have an effect on the first embed_kernel call for a given process. """ try: from IPython.parallel.apps.ipengineapp import IPEngineApp except ImportError: # IPython.parallel has higher zmq dependency that IPython.zmq class IPEngineApp: pass # get the app if it exists, or set it up if it doesn't if Application.initialized(): app = Application.instance() if isinstance(app, IPEngineApp): app.listen_kernel() # return right away, because we embed in the Engine's # namespace, not the calling one. return else: app = IPKernelApp.instance(**kwargs) app.initialize([]) # Undo unnecessary sys module mangling from init_sys_modules. # This would not be necessary if we could prevent it # in the first place by using a different InteractiveShell # subclass, as in the regular embed case. main = app.kernel.shell._orig_sys_modules_main_mod if main is not None: sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main # load the calling scope if not given (caller_module, caller_locals) = extract_module_locals(1) if module is None: module = caller_module if local_ns is None: local_ns = caller_locals app.kernel.user_module = module app.kernel.user_ns = local_ns app.start() def main(): """Run an IPKernel as an application""" app = IPKernelApp.instance() app.initialize() app.start() if __name__ == '__main__': main() revert embed_kernel changes that implied bind_kernel in engine #!/usr/bin/env python """A simple interactive kernel that talks to a frontend over 0MQ. Things to do: * Implement `set_parent` logic. Right before doing exec, the Kernel should call set_parent on all the PUB objects with the message about to be executed. * Implement random port and security key logic. * Implement control messages. * Implement event loop and poll version. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Standard library imports import __builtin__ import atexit import sys import time import traceback import logging import uuid from datetime import datetime from signal import ( signal, getsignal, default_int_handler, SIGINT, SIG_IGN ) # System library imports import zmq from zmq.eventloop import ioloop from zmq.eventloop.zmqstream import ZMQStream # Local imports from IPython.core import pylabtools from IPython.config.configurable import Configurable from IPython.config.application import boolean_flag, catch_config_error from IPython.core.application import ProfileDir from IPython.core.error import StdinNotImplementedError from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from IPython.utils import io from IPython.utils import py3compat from IPython.utils.frame import extract_module_locals from IPython.utils.jsonutil import json_clean from IPython.utils.traitlets import ( Any, Instance, Float, Dict, CaselessStrEnum, List, Set, Integer, Unicode ) from entry_point import base_launch_kernel from kernelapp import KernelApp, kernel_flags, kernel_aliases from serialize import serialize_object, unpack_apply_message from session import Session, Message from zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- class Kernel(Configurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- # attribute to override with a GUI eventloop = Any(None) def _eventloop_changed(self, name, old, new): """schedule call to eventloop from IOLoop""" loop = ioloop.IOLoop.instance() loop.add_timeout(time.time()+0.1, self.enter_eventloop) shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') session = Instance(Session) profile_dir = Instance('IPython.core.profiledir.ProfileDir') shell_streams = List() control_stream = Instance(ZMQStream) iopub_socket = Instance(zmq.Socket) stdin_socket = Instance(zmq.Socket) log = Instance(logging.Logger) user_module = Any() def _user_module_changed(self, name, old, new): if self.shell is not None: self.shell.user_module = new user_ns = Dict(default_value=None) def _user_ns_changed(self, name, old, new): if self.shell is not None: self.shell.user_ns = new self.shell.init_user_ns() # identities: int_id = Integer(-1) ident = Unicode() def _ident_default(self): return unicode(uuid.uuid4()) # Private interface # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005, config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.05, config=True) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = Dict() # set of aborted msg_ids aborted = Set() def __init__(self, **kwargs): super(Kernel, self).__init__(**kwargs) # Initialize the InteractiveShell subclass self.shell = ZMQInteractiveShell.instance(config=self.config, profile_dir = self.profile_dir, user_module = self.user_module, user_ns = self.user_ns, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('pyout') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket # TMP - hack while developing self.shell._reply_content = None # Build dict of handlers for message types msg_types = [ 'execute_request', 'complete_request', 'object_info_request', 'history_request', 'connect_request', 'shutdown_request', 'apply_request', ] self.shell_handlers = {} for msg_type in msg_types: self.shell_handlers[msg_type] = getattr(self, msg_type) control_msg_types = msg_types + [ 'clear_request', 'abort_request' ] self.control_handlers = {} for msg_type in control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) def dispatch_control(self, msg): """dispatch control requests""" idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Control Message", exc_info=True) return self.log.debug("Control received: %s", msg) header = msg['header'] msg_id = header['msg_id'] msg_type = header['msg_type'] handler = self.control_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: handler(self.control_stream, idents, msg) except Exception: self.log.error("Exception in control handler:", exc_info=True) def dispatch_shell(self, stream, msg): """dispatch shell requests""" # flush control requests first if self.control_stream: self.control_stream.flush() idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Message", exc_info=True) return header = msg['header'] msg_id = header['msg_id'] msg_type = msg['header']['msg_type'] # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type) self.log.debug(' Content: %s\n --->\n ', msg['content']) if msg_id in self.aborted: self.aborted.remove(msg_id) # is it safe to assume a msg_id will not be resubmitted? reply_type = msg_type.split('_')[0] + '_reply' status = {'status' : 'aborted'} sub = {'engine' : self.ident} sub.update(status) reply_msg = self.session.send(stream, reply_type, subheader=sub, content=status, parent=msg, ident=idents) return handler = self.shell_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type) else: # ensure default_int_handler during handler call sig = signal(SIGINT, default_int_handler) try: handler(stream, idents, msg) except Exception: self.log.error("Exception in message handler:", exc_info=True) finally: signal(SIGINT, sig) def enter_eventloop(self): """enter eventloop""" self.log.info("entering eventloop") # restore default_int_handler signal(SIGINT, default_int_handler) while self.eventloop is not None: try: self.eventloop(self) except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") continue else: # eventloop exited cleanly, this means we should stop (right?) self.eventloop = None break self.log.info("exiting eventloop") # if eventloop exits, IOLoop should stop ioloop.IOLoop.instance().stop() def start(self): """register dispatchers for streams""" self.shell.exit_now = False if self.control_stream: self.control_stream.on_recv(self.dispatch_control, copy=False) def make_dispatcher(stream): def dispatcher(msg): return self.dispatch_shell(stream, msg) return dispatcher for s in self.shell_streams: s.on_recv(make_dispatcher(s), copy=False) def do_one_iteration(self): """step eventloop just once""" if self.control_stream: self.control_stream.flush() for stream in self.shell_streams: # handle at most one request per iteration stream.flush(zmq.POLLIN, 1) stream.flush(zmq.POLLOUT) def record_ports(self, ports): """Record the ports that this kernel is using. The creator of the Kernel instance must call this methods if they want the :meth:`connect_request` method to return the port numbers. """ self._recorded_ports = ports #--------------------------------------------------------------------------- # Kernel request handlers #--------------------------------------------------------------------------- def _make_subheader(self): """init subheader dict, for execute/apply_reply""" return { 'dependencies_met' : True, 'engine' : self.ident, 'started': datetime.now(), } def _publish_pyin(self, code, parent, execution_count): """Publish the code request on the pyin stream.""" self.session.send(self.iopub_socket, u'pyin', {u'code':code, u'execution_count': execution_count}, parent=parent, ident=self._topic('pyin') ) def execute_request(self, stream, ident, parent): self.session.send(self.iopub_socket, u'status', {u'execution_state':u'busy'}, parent=parent, ident=self._topic('status'), ) try: content = parent[u'content'] code = content[u'code'] silent = content[u'silent'] except: self.log.error("Got bad msg: ") self.log.error("%s", parent) return sub = self._make_subheader() shell = self.shell # we'll need this a lot here # Replace raw_input. Note that is not sufficient to replace # raw_input in the user namespace. if content.get('allow_stdin', False): raw_input = lambda prompt='': self._raw_input(prompt, ident, parent) else: raw_input = lambda prompt='' : self._no_raw_input() if py3compat.PY3: __builtin__.input = raw_input else: __builtin__.raw_input = raw_input # Set the parent message of the display hook and out streams. shell.displayhook.set_parent(parent) shell.display_pub.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self._publish_pyin(code, parent, shell.execution_count) reply_content = {} try: # FIXME: the shell calls the exception handler itself. shell.run_cell(code, store_history=not silent, silent=silent) except: status = u'error' # FIXME: this code right now isn't being used yet by default, # because the run_cell() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb = sys.exc_info() tb_list = traceback.format_exception(etype, evalue, tb) reply_content.update(shell._showtraceback(etype, evalue, tb_list)) else: status = u'ok' reply_content[u'status'] = status # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 # FIXME - fish exception info out of shell, possibly left there by # runlines. We'll need to clean up this logic later. if shell._reply_content is not None: reply_content.update(shell._reply_content) # reset after use shell._reply_content = None # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_variables/expressions if reply_content['status'] == 'ok': reply_content[u'user_variables'] = \ shell.user_variables(content.get(u'user_variables', [])) reply_content[u'user_expressions'] = \ shell.user_expressions(content.get(u'user_expressions', {})) else: # If there was an error, don't even try to compute variables or # expressions reply_content[u'user_variables'] = {} reply_content[u'user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and clear the payload system always. reply_content[u'payload'] = shell.payload_manager.read_payload() # Be agressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) # Send the reply. reply_content = json_clean(reply_content) sub['status'] = reply_content['status'] if reply_content['status'] == 'error' and \ reply_content['ename'] == 'UnmetDependency': sub['dependencies_met'] = False reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent, subheader=sub, ident=ident) self.log.debug("%s", reply_msg) if not silent and reply_msg['content']['status'] == u'error': self._abort_queues() self.session.send(self.iopub_socket, u'status', {u'execution_state':u'idle'}, parent=parent, ident=self._topic('status')) def complete_request(self, stream, ident, parent): txt, matches = self._complete(parent) matches = {'matches' : matches, 'matched_text' : txt, 'status' : 'ok'} matches = json_clean(matches) completion_msg = self.session.send(stream, 'complete_reply', matches, parent, ident) self.log.debug("%s", completion_msg) def object_info_request(self, stream, ident, parent): content = parent['content'] object_info = self.shell.object_inspect(content['oname'], detail_level = content.get('detail_level', 0) ) # Before we send this object over, we scrub it for JSON usage oinfo = json_clean(object_info) msg = self.session.send(stream, 'object_info_reply', oinfo, parent, ident) self.log.debug("%s", msg) def history_request(self, stream, ident, parent): # We need to pull these out, as passing **kwargs doesn't work with # unicode keys before Python 2.6.5. hist_access_type = parent['content']['hist_access_type'] raw = parent['content']['raw'] output = parent['content']['output'] if hist_access_type == 'tail': n = parent['content']['n'] hist = self.shell.history_manager.get_tail(n, raw=raw, output=output, include_latest=True) elif hist_access_type == 'range': session = parent['content']['session'] start = parent['content']['start'] stop = parent['content']['stop'] hist = self.shell.history_manager.get_range(session, start, stop, raw=raw, output=output) elif hist_access_type == 'search': pattern = parent['content']['pattern'] hist = self.shell.history_manager.search(pattern, raw=raw, output=output) else: hist = [] hist = list(hist) content = {'history' : hist} content = json_clean(content) msg = self.session.send(stream, 'history_reply', content, parent, ident) self.log.debug("Sending history reply with %i entries", len(hist)) def connect_request(self, stream, ident, parent): if self._recorded_ports is not None: content = self._recorded_ports.copy() else: content = {} msg = self.session.send(stream, 'connect_reply', content, parent, ident) self.log.debug("%s", msg) def shutdown_request(self, stream, ident, parent): self.shell.exit_now = True content = dict(status='ok') content.update(parent['content']) self.session.send(stream, u'shutdown_reply', content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg(u'shutdown_reply', content, parent ) self._at_shutdown() # call sys.exit after a short delay loop = ioloop.IOLoop.instance() loop.add_timeout(time.time()+0.1, loop.stop) #--------------------------------------------------------------------------- # Engine methods #--------------------------------------------------------------------------- def apply_request(self, stream, ident, parent): try: content = parent[u'content'] bufs = parent[u'buffers'] msg_id = parent['header']['msg_id'] except: self.log.error("Got bad msg: %s", parent, exc_info=True) return # Set the parent message of the display hook and out streams. self.shell.displayhook.set_parent(parent) self.shell.display_pub.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent) # self.iopub_socket.send(pyin_msg) # self.session.send(self.iopub_socket, u'pyin', {u'code':code},parent=parent) sub = self._make_subheader() try: working = self.shell.user_ns prefix = "_"+str(msg_id).replace("-","")+"_" f,args,kwargs = unpack_apply_message(bufs, working, copy=False) fname = getattr(f, '__name__', 'f') fname = prefix+"f" argname = prefix+"args" kwargname = prefix+"kwargs" resultname = prefix+"result" ns = { fname : f, argname : args, kwargname : kwargs , resultname : None } # print ns working.update(ns) code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname) try: exec code in self.shell.user_global_ns, self.shell.user_ns result = working.get(resultname) finally: for key in ns.iterkeys(): working.pop(key) packed_result,buf = serialize_object(result) result_buf = [packed_result]+buf except: exc_content = self._wrap_exception('apply') # exc_msg = self.session.msg(u'pyerr', exc_content, parent) self.session.send(self.iopub_socket, u'pyerr', exc_content, parent=parent, ident=self._topic('pyerr')) reply_content = exc_content result_buf = [] if exc_content['ename'] == 'UnmetDependency': sub['dependencies_met'] = False else: reply_content = {'status' : 'ok'} # put 'ok'/'error' status in header, for scheduler introspection: sub['status'] = reply_content['status'] # flush i/o sys.stdout.flush() sys.stderr.flush() reply_msg = self.session.send(stream, u'apply_reply', reply_content, parent=parent, ident=ident,buffers=result_buf, subheader=sub) #--------------------------------------------------------------------------- # Control messages #--------------------------------------------------------------------------- def abort_request(self, stream, ident, parent): """abort a specifig msg by id""" msg_ids = parent['content'].get('msg_ids', None) if isinstance(msg_ids, basestring): msg_ids = [msg_ids] if not msg_ids: self.abort_queues() for mid in msg_ids: self.aborted.add(str(mid)) content = dict(status='ok') reply_msg = self.session.send(stream, 'abort_reply', content=content, parent=parent, ident=ident) self.log.debug("%s", reply_msg) def clear_request(self, stream, idents, parent): """Clear our namespace.""" self.shell.reset(False) msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent, content = dict(status='ok')) #--------------------------------------------------------------------------- # Protected interface #--------------------------------------------------------------------------- def _wrap_exception(self, method=None): # import here, because _wrap_exception is only used in parallel, # and parallel has higher min pyzmq version from IPython.parallel.error import wrap_exception e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method) content = wrap_exception(e_info) return content def _topic(self, topic): """prefixed topic for IOPub messages""" if self.int_id >= 0: base = "engine.%i" % self.int_id else: base = "kernel.%s" % self.ident return py3compat.cast_bytes("%s.%s" % (base, topic)) def _abort_queues(self): for stream in self.shell_streams: if stream: self._abort_queue(stream) def _abort_queue(self, stream): poller = zmq.Poller() poller.register(stream.socket, zmq.POLLIN) while True: idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True) if msg is None: return self.log.info("Aborting:") self.log.info("%s", msg) msg_type = msg['header']['msg_type'] reply_type = msg_type.split('_')[0] + '_reply' status = {'status' : 'aborted'} sub = {'engine' : self.ident} sub.update(status) reply_msg = self.session.send(stream, reply_type, subheader=sub, content=status, parent=msg, ident=idents) self.log.debug("%s", reply_msg) # We need to wait a bit for requests to come in. This can probably # be set shorter for true asynchronous clients. poller.poll(50) def _no_raw_input(self): """Raise StdinNotImplentedError if active frontend doesn't support stdin.""" raise StdinNotImplementedError("raw_input was called, but this " "frontend does not support stdin.") def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # Send the input request. content = json_clean(dict(prompt=prompt)) self.session.send(self.stdin_socket, u'input_request', content, parent, ident=ident) # Await a response. while True: try: ident, reply = self.session.recv(self.stdin_socket, 0) except Exception: self.log.warn("Invalid Message:", exc_info=True) else: break try: value = reply['content']['value'] except: self.log.error("Got bad raw_input reply: ") self.log.error("%s", parent) value = '' if value == '\x04': # EOF raise EOFError return value def _complete(self, msg): c = msg['content'] try: cpos = int(c['cursor_pos']) except: # If we don't get something that we can convert to an integer, at # least attempt the completion guessing the cursor is at the end of # the text, if there's any, and otherwise of the line cpos = len(c['text']) if cpos==0: cpos = len(c['line']) return self.shell.complete(c['text'], c['line'], cpos) def _object_info(self, context): symbol, leftover = self._symbol_from_context(context) if symbol is not None and not leftover: doc = getattr(symbol, '__doc__', '') else: doc = '' object_info = dict(docstring = doc) return object_info def _symbol_from_context(self, context): if not context: return None, context base_symbol_string = context[0] symbol = self.shell.user_ns.get(base_symbol_string, None) if symbol is None: symbol = __builtin__.__dict__.get(base_symbol_string, None) if symbol is None: return None, context context = context[1:] for i, name in enumerate(context): new_symbol = getattr(symbol, name, None) if new_symbol is None: return symbol, context[i:] else: symbol = new_symbol return symbol, [] def _at_shutdown(self): """Actions taken at shutdown by the kernel, called by python's atexit. """ # io.rprint("Kernel at_shutdown") # dbg if self._shutdown_message is not None: self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown')) self.log.debug("%s", self._shutdown_message) [ s.flush(zmq.POLLOUT) for s in self.shell_streams ] #----------------------------------------------------------------------------- # Aliases and Flags for the IPKernelApp #----------------------------------------------------------------------------- flags = dict(kernel_flags) flags.update(shell_flags) addflag = lambda *args: flags.update(boolean_flag(*args)) flags['pylab'] = ( {'IPKernelApp' : {'pylab' : 'auto'}}, """Pre-load matplotlib and numpy for interactive use with the default matplotlib backend.""" ) aliases = dict(kernel_aliases) aliases.update(shell_aliases) # it's possible we don't want short aliases for *all* of these: aliases.update(dict( pylab='IPKernelApp.pylab', )) #----------------------------------------------------------------------------- # The IPKernelApp class #----------------------------------------------------------------------------- class IPKernelApp(KernelApp, InteractiveShellApp): name = 'ipkernel' aliases = Dict(aliases) flags = Dict(flags) classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session] # configurables pylab = CaselessStrEnum(['tk', 'qt', 'wx', 'gtk', 'osx', 'inline', 'auto'], config=True, help="""Pre-load matplotlib and numpy for interactive use, selecting a particular matplotlib backend and loop integration. """ ) @catch_config_error def initialize(self, argv=None): super(IPKernelApp, self).initialize(argv) self.init_path() self.init_shell() self.init_extensions() self.init_code() def init_kernel(self): shell_stream = ZMQStream(self.shell_socket) kernel = Kernel(config=self.config, session=self.session, shell_streams=[shell_stream], iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, ) self.kernel = kernel kernel.record_ports(self.ports) shell = kernel.shell if self.pylab: try: gui, backend = pylabtools.find_gui_and_backend(self.pylab) shell.enable_pylab(gui, import_all=self.pylab_import_all) except Exception: self.log.error("Pylab initialization failed", exc_info=True) # print exception straight to stdout, because normally # _showtraceback associates the reply with an execution, # which means frontends will never draw it, as this exception # is not associated with any execute request. # replace pyerr-sending traceback with stdout _showtraceback = shell._showtraceback def print_tb(etype, evalue, stb): print ("Error initializing pylab, pylab mode will not " "be active", file=io.stderr) print (shell.InteractiveTB.stb2text(stb), file=io.stdout) shell._showtraceback = print_tb # send the traceback over stdout shell.showtraceback(tb_offset=0) # restore proper _showtraceback method shell._showtraceback = _showtraceback def init_shell(self): self.shell = self.kernel.shell self.shell.configurables.append(self) #----------------------------------------------------------------------------- # Kernel main and launch functions #----------------------------------------------------------------------------- def launch_kernel(*args, **kwargs): """Launches a localhost IPython kernel, binding to the specified ports. This function simply calls entry_point.base_launch_kernel with the right first command to start an ipkernel. See base_launch_kernel for arguments. Returns ------- A tuple of form: (kernel_process, shell_port, iopub_port, stdin_port, hb_port) where kernel_process is a Popen object and the ports are integers. """ return base_launch_kernel('from IPython.zmq.ipkernel import main; main()', *args, **kwargs) def embed_kernel(module=None, local_ns=None, **kwargs): """Embed and start an IPython kernel in a given scope. Parameters ---------- module : ModuleType, optional The module to load into IPython globals (default: caller) local_ns : dict, optional The namespace to load into IPython user namespace (default: caller) kwargs : various, optional Further keyword args are relayed to the KernelApp constructor, allowing configuration of the Kernel. Will only have an effect on the first embed_kernel call for a given process. """ # get the app if it exists, or set it up if it doesn't if IPKernelApp.initialized(): app = IPKernelApp.instance() else: app = IPKernelApp.instance(**kwargs) app.initialize([]) # Undo unnecessary sys module mangling from init_sys_modules. # This would not be necessary if we could prevent it # in the first place by using a different InteractiveShell # subclass, as in the regular embed case. main = app.kernel.shell._orig_sys_modules_main_mod if main is not None: sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main # load the calling scope if not given (caller_module, caller_locals) = extract_module_locals(1) if module is None: module = caller_module if local_ns is None: local_ns = caller_locals app.kernel.user_module = module app.kernel.user_ns = local_ns app.start() def main(): """Run an IPKernel as an application""" app = IPKernelApp.instance() app.initialize() app.start() if __name__ == '__main__': main()
# Testing for the whole ISgeneTK package import mods.gel_visualizer as gv import mods.SeqProp as sp import mods.plasmid_builder as pb import mods.MSM as msm def setup_module(module): print ("") # this is to get a newline after the dots print ("Initializing GeneTK Test...") def teardown_module(module): print ("Finalizing GeneTK Test...") def is_setup_function(): print ("Setting up data tests") def is_teardown_function(): print ("Tearing down data tests") @with_setup(is_setup_function, is_teardown_function) def test_data(): print 'Checking test data' # assert multiply(3,4) == 12 <--- Do some assertions with all test data class TestGene: def setup(self): print ("Setting up next test") def teardown(self): print ("Tearing down test") @classmethod def setup(self): print ("Setting up test") @classmethod def teardown(self): print ("Tearing down test") def test_GelViz(self): print ("Testing Gel.Viz...") # Testing that digestSeq() works max_lengths, lengths_lists = gv.digestSeq() # Fill in with inputs assert len(max_lengths) =< len(length_lists), "Please make sure that your inputs are structured correctly." for length in max_length: assert isinstance (length, int), "Please make sure input is formatted correctly" for lengths in lengths_list: for lenght in lengths: assert isinstance (length, int), "Please make sure input is formatted correctly" # Testing that bigDraw() works # Testing that smallDraw() works def test_SeqProp(self): print ("Testing SeqProp") # break up into smaller functions and then assert for each # if no re sites, start, stop, or if tm range bad print warning def test_BUILDR(self): print ("Testing BUILDR") # Wait until BUILDR is finished def test_MSM(self): print ("Testing MSM") # Wait until MSM is finished Added SeqProp test script # Testing for the whole ISgeneTK package import mods.gel_visualizer as gv import mods.SeqProp as sp import mods.plasmid_builder as pb import mods.MSM as msm def setup_module(module): print ("") # this is to get a newline after the dots print ("Initializing GeneTK Test...") def teardown_module(module): print ("Finalizing GeneTK Test...") def is_setup_function(): print ("Setting up data tests") def is_teardown_function(): print ("Tearing down data tests") @with_setup(is_setup_function, is_teardown_function) def test_data(): print 'Checking test data' # assert multiply(3,4) == 12 <--- Do some assertions with all test data class TestGene: def setup(self): print ("Setting up next test") def teardown(self): print ("Tearing down test") @classmethod def setup(self): print ("Setting up test") @classmethod def teardown(self): print ("Tearing down test") def test_GelViz(self): print ("Testing Gel.Viz...") # Testing that digestSeq() works max_lengths, lengths_lists = gv.digestSeq() # NEED SAMPLE SEQUENCE assert len(max_lengths) ==< len(length_lists), "Please make sure that your inputs are structured correctly." for length in max_length: assert isinstance (length, int), "Please make sure input is formatted correctly." for lengths in lengths_list: for length in lengths: assert isinstance (length, int), "Please make sure input is formatted correctly." # TEST THAT LENGTHS CORRECT FOR TEST DATA assert lengths_lists == [[val, val], [val, val], [val, val]], "Restriction lengths do match correct lengths." # Testing that bigDraw() works # Testing that smallDraw() works def test_SeqProp(self): print ("Testing SeqProp...") # Testing GC, Tm outputs # NEED SAMPLE SEQUENCES X 2 (one small < 14, one big > 15) gcc_small, gcn_small = sp.getGC(small_seq) # SMALL SEQUENCE HERE assert gcc_small == Ns, "Small oligomer GC content does not match." # KNOWN GCC gcc_big, gcn_big = sp.getGC(big_seq) # BIG SEQ HERE assert gcc_big == Nb, "Large oligomer GC content does not match." # KNOWN GCC Tm = sp.getTm(big_seq, gcc_big) assert Tm == val, "Melting temperature does not match." # Reverse Complement assert len(big_seq) == len(sp.getRevComp(big_seq)), "Reverse complement length does not match." # Testing Start, Stop, Exon, and Re Site Indices and Warnings starts, stops = sp.getStartStop(big_seq) assert len(starts) == val, "Number of start codon indices do not match." assert len(stops) == val, "Number of stop codon indices do not match." exons = sp.getExons(big_seq, starts, stops) assert len(exons) == min([len(starts), len(stops)]), "Error with exon detection; number of exons does not match min(codons)" # READ RE SITES HERE re_sites = sp.getRe(sequence, re_sites) assert len(re_sites) == val, "Number of restriction sites do not match." def test_BUILDR(self): print ("Testing BUILDR...") # Wait until BUILDR is finished def test_MSM(self): print ("Testing MSM...") # Wait until MSM is finished
import hashlib try: from urllib.request import urlopen except ImportError: # Python 2 from urllib2 import urlopen from django.core.exceptions import ImproperlyConfigured from django.core.files.base import ContentFile from django.core.files.storage import FileSystemStorage from django.contrib.staticfiles.finders import BaseFinder from django.contrib.staticfiles.utils import matches_patterns from django.conf import settings hash_func_map = { 'md5': hashlib.md5, 'sha1': hashlib.sha1, 'sha224': hashlib.sha224, 'sha256': hashlib.sha256, 'sha384': hashlib.sha384, 'sha512': hashlib.sha512, } class RemoteFinder(BaseFinder): def __init__(self): self.cache_dir = settings.REMOTE_FINDER_CACHE_DIR self.storage = FileSystemStorage(self.cache_dir) resources_setting = settings.REMOTE_FINDER_RESOURCES if not isinstance(resources_setting, (list, tuple)): raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES must be a list or tuple") resources = {} for resource in resources_setting: try: path, url, cksm = resource except ValueError: raise ImproperlyConfigured("Each item in settings.REMOTE_FINDER_RESOURCES must be a tuple of three elements (path, url, cksm).") try: hash_type, expected_hexdigest = cksm.split(':') except ValueError: raise ImproperlyConfigured("RemoteFinder checksum `%s` is not in `hash_type:hexdigest` format." % cksm) try: hash_func = hash_func_map[hash_type] except KeyError: raise ImproperlyConfigured("RemoteFinder: hash type `%s` unknown" % hash_type) try: expected_digest = bytearray.fromhex(expected_hexdigest) except ValueError: raise ImproperlyConfigured("Cannot parse hex string in settings.REMOTE_FINDER_RESOURCES: `%s`" % expected_hexdigest) if len(expected_digest) != hash_func().digest_size: raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES: %s digest expected %d bytes but %d provided: `%s`" % (hash_type, hash_func().digest_size, len(expected_digest), expected_hexdigest)) resources[path] = (url, hash_func, expected_digest) self.resources = resources def find(self, path, all=False): try: fetch_info = self.resources[path] except KeyError: return [] self.fetch(path, fetch_info) match = self.storage.path(path) if all: return [match] else: return match def fetch(self, path, fetch_info): if self.storage.exists(path): return url, hash_func, expected_digest = fetch_info # download the file f = urlopen(url) try: content = f.read() finally: f.close() # check its hash digest = hash_func(content).digest() if digest != expected_digest: raise Exception("Digest does not match!") # save it name = self.storage.save(path, ContentFile(content)) if name != path: print("Warning: %r != %r" % (name, path)) def list(self, ignore_patterns): for path, fetch_info in self.resources.items(): if matches_patterns(path, ignore_patterns): continue self.fetch(path, fetch_info) yield path, self.storage # fixme: make a way to verify all hashes are correct, either on the cmdline or # all the time additional errors if required settings are not defined import hashlib import logging try: from urllib.request import urlopen except ImportError: # Python 2 from urllib2 import urlopen from django.core.exceptions import ImproperlyConfigured from django.core.files.base import ContentFile from django.core.files.storage import FileSystemStorage from django.contrib.staticfiles.finders import BaseFinder from django.contrib.staticfiles.utils import matches_patterns from django.conf import settings logger = logging.getLogger(__name__) hash_func_map = { 'md5': hashlib.md5, 'sha1': hashlib.sha1, 'sha224': hashlib.sha224, 'sha256': hashlib.sha256, 'sha384': hashlib.sha384, 'sha512': hashlib.sha512, } class RemoteFinder(BaseFinder): def __init__(self): self.cache_dir = getattr(settings, "REMOTE_FINDER_CACHE_DIR", None) if not self.cache_dir: raise ImproperlyConfigured("settings.REMOTE_FINDER_CACHE_DIR must point to a cache directory.") self.storage = FileSystemStorage(self.cache_dir) try: resources_setting = settings.REMOTE_FINDER_RESOURCES except AttributeError: logger.warning("RemoteFinder is enabled, but settings.REMOTE_FINDER_RESOURCES is not defined.") resources_setting = () if not isinstance(resources_setting, (list, tuple)): raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES must be a list or tuple") resources = {} for resource in resources_setting: try: path, url, cksm = resource except ValueError: raise ImproperlyConfigured("Each item in settings.REMOTE_FINDER_RESOURCES must be a tuple of three elements (path, url, cksm).") try: hash_type, expected_hexdigest = cksm.split(':') except ValueError: raise ImproperlyConfigured("RemoteFinder checksum `%s` is not in `hash_type:hexdigest` format." % cksm) try: hash_func = hash_func_map[hash_type] except KeyError: raise ImproperlyConfigured("RemoteFinder: hash type `%s` unknown" % hash_type) try: expected_digest = bytearray.fromhex(expected_hexdigest) except ValueError: raise ImproperlyConfigured("Cannot parse hex string in settings.REMOTE_FINDER_RESOURCES: `%s`" % expected_hexdigest) if len(expected_digest) != hash_func().digest_size: raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES: %s digest expected %d bytes but %d provided: `%s`" % (hash_type, hash_func().digest_size, len(expected_digest), expected_hexdigest)) resources[path] = (url, hash_func, expected_digest) self.resources = resources def find(self, path, all=False): try: fetch_info = self.resources[path] except KeyError: return [] self.fetch(path, fetch_info) match = self.storage.path(path) if all: return [match] else: return match def fetch(self, path, fetch_info): if self.storage.exists(path): return url, hash_func, expected_digest = fetch_info # download the file f = urlopen(url) try: content = f.read() finally: f.close() # check its hash digest = hash_func(content).digest() if digest != expected_digest: raise Exception("Digest does not match!") # save it name = self.storage.save(path, ContentFile(content)) if name != path: print("Warning: %r != %r" % (name, path)) def list(self, ignore_patterns): for path, fetch_info in self.resources.items(): if matches_patterns(path, ignore_patterns): continue self.fetch(path, fetch_info) yield path, self.storage # fixme: make a way to verify all hashes are correct, either on the cmdline or # all the time
# # Copyright (C) 2012 Ash (Tuxdude) <tuxdude.github@gmail.com> # # This file is part of repobuddy. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program. If not, see # <http://www.gnu.org/licenses/>. # import os as _os import shlex as _shlex import shutil as _shutil import subprocess as _subprocess import sys as _sys import traceback as _traceback if _sys.version_info < (2, 7): # pylint: disable=F0401 import cStringIO as _io import ordereddict as _collections import unittest2 as _unittest else: # pylint: disable=F0401 if _sys.version_info < (3, 0): import cStringIO as _io else: import io as _io import collections as _collections import unittest as _unittest from repobuddy.utils import RepoBuddyBaseException, Logger class ShellError(RepoBuddyBaseException): def __init__(self, error_str): super(ShellError, self).__init__(error_str) return class ShellHelper: # pylint: disable=W0232 @classmethod def exec_command(cls, command, base_dir, debug_output=True): Logger.msg('>> ' + ' '.join(command)) try: kwargs = {} return_code = None if not debug_output: kwargs['stdout'] = open(_os.devnull, 'w') kwargs['stderr'] = _subprocess.STDOUT proc = _subprocess.Popen(command, # pylint: disable=W0142 cwd=base_dir, **kwargs) try: proc.communicate() except: proc.kill() proc.wait() raise return_code = proc.poll() if return_code != 0: raise ShellError('Command \'%s\' failed!' % command) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def append_text_to_file(cls, text, filename, base_dir): try: with open(_os.path.join(base_dir, filename), 'a') as file_handle: file_handle.write(text) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def remove_file(cls, filename): try: _os.unlink(filename) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def make_dir(cls, dirname, create_parent_dirs=False, only_if_not_exists=False): if not create_parent_dirs: try: if not only_if_not_exists or not _os.path.exists(dirname): _os.mkdir(dirname) except (OSError, IOError) as err: raise ShellError(str(err)) else: try: if not only_if_not_exists or not _os.path.exists(dirname): _os.makedirs(dirname) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def remove_dir(cls, dirname): if _os.path.isdir(dirname): try: _shutil.rmtree(dirname, ignore_errors=False) except (OSError, IOError) as err: raise ShellError(str(err)) return class TestCommon: # pylint: disable=W0232 @classmethod def _git_append_add_commit(cls, text, filename, commit_log, exec_dir): ShellHelper.append_text_to_file(text, filename, exec_dir) ShellHelper.exec_command( _shlex.split('git add %s' % filename), exec_dir) ShellHelper.exec_command( _shlex.split('git commit -m "%s"' % commit_log), exec_dir) return @classmethod def setup_test_repos(cls, base_dir): # Cleanup and create an empty directory ShellHelper.remove_dir(base_dir) ShellHelper.make_dir(base_dir) # Set up the origin and clone repo paths origin_repo_url = _os.path.join(base_dir, 'repo-origin') clone_repo1 = _os.path.join(base_dir, 'clone1') clone_repo2 = _os.path.join(base_dir, 'clone2') # Set up the origin as a bare repo ShellHelper.make_dir(origin_repo_url) ShellHelper.exec_command( _shlex.split('git init --bare'), origin_repo_url) # Create Clone1 from the origin ShellHelper.exec_command( _shlex.split('git clone %s %s' % (origin_repo_url, clone_repo1)), base_dir) # Create some content in clone1 cls._git_append_add_commit( 'First content...\n', 'README', 'First commit.', clone_repo1) cls._git_append_add_commit( 'Hardly useful...\n', 'dummy', 'Here we go.', clone_repo1) cls._git_append_add_commit( 'More content...\n', 'README', 'Appending to README.', clone_repo1) # Push the changes to origin ShellHelper.exec_command( _shlex.split('git push origin master'), clone_repo1) # Make some more changes, but do not push yet cls._git_append_add_commit( 'Another line...\n', 'README', 'One more to README.', clone_repo1) cls._git_append_add_commit( 'Dummy2 in place...\n', 'dummy2', 'Creating dummy2.', clone_repo1) # Create clone2 from the origin ShellHelper.exec_command( _shlex.split('git clone %s %s' % (origin_repo_url, clone_repo2)), base_dir) # Add and commit changes in clone2 cls._git_append_add_commit( 'Another line...\n', 'dummy', 'One more to dummy.', clone_repo2) cls._git_append_add_commit( 'More dummy...\n', 'dummy', 'More dummy.', clone_repo2) # Create a new branch in clone2 ShellHelper.exec_command( _shlex.split('git branch new-branch'), clone_repo2) ShellHelper.exec_command( _shlex.split('git checkout new-branch'), clone_repo2) # Add some more changes in clone2's new-branch cls._git_append_add_commit( 'More lines...\n', 'dummy', 'Another line to dummy.', clone_repo2) cls._git_append_add_commit( 'Just keep it coming...\n', 'dummy', 'Again :D', clone_repo2) # Switch back to master in clone2 ShellHelper.exec_command( _shlex.split('git checkout master'), clone_repo2) # Push all branches to origin ShellHelper.exec_command( _shlex.split('git push origin --all'), clone_repo2) # Pull changes from origin into clone1 ShellHelper.exec_command( _shlex.split('git fetch origin'), clone_repo1) ShellHelper.exec_command( _shlex.split( 'git merge --commit -m "Merge origin into clone1" ' + 'origin/master'), clone_repo1) # Now push the merges back to origin ShellHelper.exec_command( _shlex.split('git push origin master'), clone_repo1) # Get the changes from origin into clone2 ShellHelper.exec_command( _shlex.split('git fetch origin'), clone_repo2) ShellHelper.exec_command( _shlex.split( 'git merge --commit -m "Merge origin into clone2" ' + 'origin/master'), clone_repo2) # Now push the merges back to origin ShellHelper.exec_command( _shlex.split('git push origin --all'), clone_repo2) # Now get rid of the clone repos, we only need the origin ShellHelper.remove_dir(clone_repo1) ShellHelper.remove_dir(clone_repo2) return class TestCaseBase(_unittest.TestCase): def _set_tear_down_cb(self, method, *args, **kwargs): self._tear_down_cb = method self._tear_down_cb_args = args self._tear_down_cb_kwargs = kwargs return def _clear_tear_down_cb(self): self._tear_down_cb = None self._tear_down_cb_args = None self._tear_down_cb_kwargs = None return def __init__(self, methodName='runTest'): super(TestCaseBase, self).__init__(methodName) self._tear_down_cb = None self._tear_down_cb_args = None self._tear_down_cb_kwargs = None if _sys.version_info >= (3, 2): # pylint: disable=E1101 self._count_equal = self.assertCountEqual else: # pylint: disable=E1101 self._count_equal = self.assertItemsEqual return def setUp(self): return def tearDown(self): if not self._tear_down_cb is None: self._tear_down_cb(*self._tear_down_cb_args, **self._tear_down_cb_kwargs) self._clear_tear_down_cb() return class TestResult(_unittest.TestResult): PASSED = 0 ERROR = 1 FAILED = 2 SKIPPED = 3 EXPECTED_FAILURE = 4 UNEXPECTED_SUCCESS = 5 _result_str = {PASSED: 'PASSED', ERROR: 'ERROR', FAILED: 'FAILED', SKIPPED: 'SKIPPED', EXPECTED_FAILURE: 'EXPECTED FAILURE', UNEXPECTED_SUCCESS: 'UNEXPECTED_SUCCESS'} def _update_result(self, test, err, result_type): module_test_results = [] test_id = test.id().split('.') if test_id[-2] not in self.test_results: self.test_results[test_id[-2]] = module_test_results else: module_test_results = self.test_results[test_id[-2]] result = {} result['test_case'] = test_id[-1] result['description'] = str(test.shortDescription()) result['result'] = result_type if not err is None: result['formated_traceback'] = \ ''.join(_traceback.format_exception(err[0], err[1], err[2])) module_test_results.append(result) return def __init__(self): super(TestResult, self).__init__() self.test_results = _collections.OrderedDict() self.has_errors = False self.has_failures = False self.has_unexpected_success = False return @classmethod def get_result_str(cls, result): return cls._result_str[result] def addError(self, test, err): self._update_result(test, err, type(self).ERROR) self.has_errors = True return def addFailure(self, test, err): self._update_result(test, err, type(self).FAILED) self.has_failures = True return def addSuccess(self, test): self._update_result(test, None, type(self).PASSED) return def addSkip(self, test, reason): self._update_result(test, None, type(self).SKIPPED) return def addExpectedFailure(self, test, err): self._update_result(test, err, type(self).EXPECTED_FAILURE) return def addUnexpectedSuccess(self, test): self._update_result(test, None, type(self).UNEXPECTED_SUCCESS) self.has_unexpected_success = True return class TestRunner(_unittest.TextTestRunner): def __init__(self, stream=_sys.stderr, descriptions=True, verbosity=1): super(TestRunner, self).__init__(stream, descriptions, verbosity) self._test_result = None return def _makeResult(self): self._test_result = TestResult() return self._test_result def get_test_result(self): return self._test_result class TestSuiteManager(object): _base_dir = None @classmethod def get_base_dir(cls): return cls._base_dir def __init__(self, base_dir): if not _os.path.isdir(base_dir): ShellHelper.make_dir(base_dir) type(self)._base_dir = base_dir self._test_suite = None self._output = _io.StringIO() self._test_result = None return def add_test_suite(self, test_suite): if self._test_suite is None: self._test_suite = test_suite else: self._test_suite.addTest(test_suite) return def run(self): runner = TestRunner( stream=self._output, verbosity=0) runner.run(self._test_suite) self._test_result = runner.get_test_result() return def show_results(self): Logger.msg('\n') Logger.msg('*' * 120) Logger.msg('{0:^120}'.format('Test Summary')) Logger.msg('*' * 120) Logger.msg(self._output.getvalue()) Logger.msg('-' * 120 + '\n\n') error_traces_str = '' failure_traces_str = '' for test_suite, results in self._test_result.test_results.items(): Logger.msg('TestSuite: %s' % test_suite) Logger.msg('#' * 120) Logger.msg('{0:48} {1:56} {2:16}'.format( 'TestCase', 'Description', 'Result')) Logger.msg('#' * 120) for result in results: Logger.msg('{0:48} {1:56} {2:16}'.format( result['test_case'], result['description'], TestResult.get_result_str(result['result']))) if result['result'] == TestResult.ERROR: error_traces_str += \ '%s::%s\n%s\n' % (test_suite, result['test_case'], result['formated_traceback']) elif result['result'] == TestResult.FAILED: failure_traces_str += \ '%s::%s\n%s\n' % (test_suite, result['test_case'], result['formated_traceback']) Logger.msg('-' * 120 + '\n\n') if self._test_result.has_errors: Logger.msg('#' * 120) Logger.msg('Errors') Logger.msg('#' * 120 + '\n') Logger.msg(error_traces_str + '-' * 120 + '\n') if self._test_result.has_failures: Logger.msg('#' * 120) Logger.msg('Failures') Logger.msg('#' * 120 + '\n') Logger.msg(failure_traces_str + '-' * 120 + '\n') Logger.msg('Tests Run: ' + str(self._test_result.testsRun)) return def was_successful(self): return (not self._test_result.has_errors) and \ (not self._test_result.has_failures) and \ (not self._test_result.has_unexpected_success) Add a ShellHelper method to read a file as a string. # # Copyright (C) 2012 Ash (Tuxdude) <tuxdude.github@gmail.com> # # This file is part of repobuddy. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program. If not, see # <http://www.gnu.org/licenses/>. # import os as _os import shlex as _shlex import shutil as _shutil import subprocess as _subprocess import sys as _sys import traceback as _traceback if _sys.version_info < (2, 7): # pylint: disable=F0401 import cStringIO as _io import ordereddict as _collections import unittest2 as _unittest else: # pylint: disable=F0401 if _sys.version_info < (3, 0): import cStringIO as _io else: import io as _io import collections as _collections import unittest as _unittest from repobuddy.utils import RepoBuddyBaseException, Logger class ShellError(RepoBuddyBaseException): def __init__(self, error_str): super(ShellError, self).__init__(error_str) return class ShellHelper: # pylint: disable=W0232 @classmethod def exec_command(cls, command, base_dir, debug_output=True): Logger.msg('>> ' + ' '.join(command)) try: kwargs = {} return_code = None if not debug_output: kwargs['stdout'] = open(_os.devnull, 'w') kwargs['stderr'] = _subprocess.STDOUT proc = _subprocess.Popen(command, # pylint: disable=W0142 cwd=base_dir, **kwargs) try: proc.communicate() except: proc.kill() proc.wait() raise return_code = proc.poll() if return_code != 0: raise ShellError('Command \'%s\' failed!' % command) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def read_file_as_string(cls, filename): try: with open(filename, 'r') as file_handle: data = file_handle.read() except (OSError, IOError) as err: raise ShellError(str(err)) return data @classmethod def append_text_to_file(cls, text, filename, base_dir): try: with open(_os.path.join(base_dir, filename), 'a') as file_handle: file_handle.write(text) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def remove_file(cls, filename): try: _os.unlink(filename) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def make_dir(cls, dirname, create_parent_dirs=False, only_if_not_exists=False): if not create_parent_dirs: try: if not only_if_not_exists or not _os.path.exists(dirname): _os.mkdir(dirname) except (OSError, IOError) as err: raise ShellError(str(err)) else: try: if not only_if_not_exists or not _os.path.exists(dirname): _os.makedirs(dirname) except (OSError, IOError) as err: raise ShellError(str(err)) return @classmethod def remove_dir(cls, dirname): if _os.path.isdir(dirname): try: _shutil.rmtree(dirname, ignore_errors=False) except (OSError, IOError) as err: raise ShellError(str(err)) return class TestCommon: # pylint: disable=W0232 @classmethod def _git_append_add_commit(cls, text, filename, commit_log, exec_dir): ShellHelper.append_text_to_file(text, filename, exec_dir) ShellHelper.exec_command( _shlex.split('git add %s' % filename), exec_dir) ShellHelper.exec_command( _shlex.split('git commit -m "%s"' % commit_log), exec_dir) return @classmethod def setup_test_repos(cls, base_dir): # Cleanup and create an empty directory ShellHelper.remove_dir(base_dir) ShellHelper.make_dir(base_dir) # Set up the origin and clone repo paths origin_repo_url = _os.path.join(base_dir, 'repo-origin') clone_repo1 = _os.path.join(base_dir, 'clone1') clone_repo2 = _os.path.join(base_dir, 'clone2') # Set up the origin as a bare repo ShellHelper.make_dir(origin_repo_url) ShellHelper.exec_command( _shlex.split('git init --bare'), origin_repo_url) # Create Clone1 from the origin ShellHelper.exec_command( _shlex.split('git clone %s %s' % (origin_repo_url, clone_repo1)), base_dir) # Create some content in clone1 cls._git_append_add_commit( 'First content...\n', 'README', 'First commit.', clone_repo1) cls._git_append_add_commit( 'Hardly useful...\n', 'dummy', 'Here we go.', clone_repo1) cls._git_append_add_commit( 'More content...\n', 'README', 'Appending to README.', clone_repo1) # Push the changes to origin ShellHelper.exec_command( _shlex.split('git push origin master'), clone_repo1) # Make some more changes, but do not push yet cls._git_append_add_commit( 'Another line...\n', 'README', 'One more to README.', clone_repo1) cls._git_append_add_commit( 'Dummy2 in place...\n', 'dummy2', 'Creating dummy2.', clone_repo1) # Create clone2 from the origin ShellHelper.exec_command( _shlex.split('git clone %s %s' % (origin_repo_url, clone_repo2)), base_dir) # Add and commit changes in clone2 cls._git_append_add_commit( 'Another line...\n', 'dummy', 'One more to dummy.', clone_repo2) cls._git_append_add_commit( 'More dummy...\n', 'dummy', 'More dummy.', clone_repo2) # Create a new branch in clone2 ShellHelper.exec_command( _shlex.split('git branch new-branch'), clone_repo2) ShellHelper.exec_command( _shlex.split('git checkout new-branch'), clone_repo2) # Add some more changes in clone2's new-branch cls._git_append_add_commit( 'More lines...\n', 'dummy', 'Another line to dummy.', clone_repo2) cls._git_append_add_commit( 'Just keep it coming...\n', 'dummy', 'Again :D', clone_repo2) # Switch back to master in clone2 ShellHelper.exec_command( _shlex.split('git checkout master'), clone_repo2) # Push all branches to origin ShellHelper.exec_command( _shlex.split('git push origin --all'), clone_repo2) # Pull changes from origin into clone1 ShellHelper.exec_command( _shlex.split('git fetch origin'), clone_repo1) ShellHelper.exec_command( _shlex.split( 'git merge --commit -m "Merge origin into clone1" ' + 'origin/master'), clone_repo1) # Now push the merges back to origin ShellHelper.exec_command( _shlex.split('git push origin master'), clone_repo1) # Get the changes from origin into clone2 ShellHelper.exec_command( _shlex.split('git fetch origin'), clone_repo2) ShellHelper.exec_command( _shlex.split( 'git merge --commit -m "Merge origin into clone2" ' + 'origin/master'), clone_repo2) # Now push the merges back to origin ShellHelper.exec_command( _shlex.split('git push origin --all'), clone_repo2) # Now get rid of the clone repos, we only need the origin ShellHelper.remove_dir(clone_repo1) ShellHelper.remove_dir(clone_repo2) return class TestCaseBase(_unittest.TestCase): def _set_tear_down_cb(self, method, *args, **kwargs): self._tear_down_cb = method self._tear_down_cb_args = args self._tear_down_cb_kwargs = kwargs return def _clear_tear_down_cb(self): self._tear_down_cb = None self._tear_down_cb_args = None self._tear_down_cb_kwargs = None return def __init__(self, methodName='runTest'): super(TestCaseBase, self).__init__(methodName) self._tear_down_cb = None self._tear_down_cb_args = None self._tear_down_cb_kwargs = None if _sys.version_info >= (3, 2): # pylint: disable=E1101 self._count_equal = self.assertCountEqual else: # pylint: disable=E1101 self._count_equal = self.assertItemsEqual return def setUp(self): return def tearDown(self): if not self._tear_down_cb is None: self._tear_down_cb(*self._tear_down_cb_args, **self._tear_down_cb_kwargs) self._clear_tear_down_cb() return class TestResult(_unittest.TestResult): PASSED = 0 ERROR = 1 FAILED = 2 SKIPPED = 3 EXPECTED_FAILURE = 4 UNEXPECTED_SUCCESS = 5 _result_str = {PASSED: 'PASSED', ERROR: 'ERROR', FAILED: 'FAILED', SKIPPED: 'SKIPPED', EXPECTED_FAILURE: 'EXPECTED FAILURE', UNEXPECTED_SUCCESS: 'UNEXPECTED_SUCCESS'} def _update_result(self, test, err, result_type): module_test_results = [] test_id = test.id().split('.') if test_id[-2] not in self.test_results: self.test_results[test_id[-2]] = module_test_results else: module_test_results = self.test_results[test_id[-2]] result = {} result['test_case'] = test_id[-1] result['description'] = str(test.shortDescription()) result['result'] = result_type if not err is None: result['formated_traceback'] = \ ''.join(_traceback.format_exception(err[0], err[1], err[2])) module_test_results.append(result) return def __init__(self): super(TestResult, self).__init__() self.test_results = _collections.OrderedDict() self.has_errors = False self.has_failures = False self.has_unexpected_success = False return @classmethod def get_result_str(cls, result): return cls._result_str[result] def addError(self, test, err): self._update_result(test, err, type(self).ERROR) self.has_errors = True return def addFailure(self, test, err): self._update_result(test, err, type(self).FAILED) self.has_failures = True return def addSuccess(self, test): self._update_result(test, None, type(self).PASSED) return def addSkip(self, test, reason): self._update_result(test, None, type(self).SKIPPED) return def addExpectedFailure(self, test, err): self._update_result(test, err, type(self).EXPECTED_FAILURE) return def addUnexpectedSuccess(self, test): self._update_result(test, None, type(self).UNEXPECTED_SUCCESS) self.has_unexpected_success = True return class TestRunner(_unittest.TextTestRunner): def __init__(self, stream=_sys.stderr, descriptions=True, verbosity=1): super(TestRunner, self).__init__(stream, descriptions, verbosity) self._test_result = None return def _makeResult(self): self._test_result = TestResult() return self._test_result def get_test_result(self): return self._test_result class TestSuiteManager(object): _base_dir = None @classmethod def get_base_dir(cls): return cls._base_dir def __init__(self, base_dir): if not _os.path.isdir(base_dir): ShellHelper.make_dir(base_dir) type(self)._base_dir = base_dir self._test_suite = None self._output = _io.StringIO() self._test_result = None return def add_test_suite(self, test_suite): if self._test_suite is None: self._test_suite = test_suite else: self._test_suite.addTest(test_suite) return def run(self): runner = TestRunner( stream=self._output, verbosity=0) runner.run(self._test_suite) self._test_result = runner.get_test_result() return def show_results(self): Logger.msg('\n') Logger.msg('*' * 120) Logger.msg('{0:^120}'.format('Test Summary')) Logger.msg('*' * 120) Logger.msg(self._output.getvalue()) Logger.msg('-' * 120 + '\n\n') error_traces_str = '' failure_traces_str = '' for test_suite, results in self._test_result.test_results.items(): Logger.msg('TestSuite: %s' % test_suite) Logger.msg('#' * 120) Logger.msg('{0:48} {1:56} {2:16}'.format( 'TestCase', 'Description', 'Result')) Logger.msg('#' * 120) for result in results: Logger.msg('{0:48} {1:56} {2:16}'.format( result['test_case'], result['description'], TestResult.get_result_str(result['result']))) if result['result'] == TestResult.ERROR: error_traces_str += \ '%s::%s\n%s\n' % (test_suite, result['test_case'], result['formated_traceback']) elif result['result'] == TestResult.FAILED: failure_traces_str += \ '%s::%s\n%s\n' % (test_suite, result['test_case'], result['formated_traceback']) Logger.msg('-' * 120 + '\n\n') if self._test_result.has_errors: Logger.msg('#' * 120) Logger.msg('Errors') Logger.msg('#' * 120 + '\n') Logger.msg(error_traces_str + '-' * 120 + '\n') if self._test_result.has_failures: Logger.msg('#' * 120) Logger.msg('Failures') Logger.msg('#' * 120 + '\n') Logger.msg(failure_traces_str + '-' * 120 + '\n') Logger.msg('Tests Run: ' + str(self._test_result.testsRun)) return def was_successful(self): return (not self._test_result.has_errors) and \ (not self._test_result.has_failures) and \ (not self._test_result.has_unexpected_success)
# Python import StringIO from collections import Counter, OrderedDict # Django from django.core import urlresolvers from django_countries import countries from django.db.models import Count, FieldDoesNotExist from django.contrib.sites.shortcuts import get_current_site # 3rd Party import xlsxwriter # Project from forms.models import ( NewspaperSheet, NewspaperPerson, TelevisionJournalist, person_models, sheet_models, journalist_models) from forms.modelutils import (TOPICS, GENDER, SPACE, OCCUPATION, FUNCTION, SCOPE, YESNO, AGES, SOURCE, VICTIM_OF, SURVIVOR_OF, IS_PHOTOGRAPH, AGREE_DISAGREE, RETWEET, TV_ROLE, MEDIA_TYPES, CountryRegion) from report_details import WS_INFO, REGION_COUNTRY_MAP, MAJOR_TOPICS, TOPIC_GROUPS, GROUP_TOPICS_MAP, FORMATS def has_field(model, fld): try: model._meta.get_field(fld) return True except FieldDoesNotExist: return False def p(n, d): """ Helper to calculate the percentage of n / d, returning 0 if d == 0. """ if d == 0: return 0.0 return float(n) / d def get_regions(): """ Return a (id, region_name) list for all regions """ country_regions = CountryRegion.objects\ .values('region')\ .exclude(region='Unmapped') regions = set(item['region'] for item in country_regions) return [(i, region) for i, region in enumerate(regions)] def get_countries(selected=None): """ Return a (code, country) list for countries captured. """ captured_country_codes = set() for model in sheet_models.itervalues(): rows = model.objects.values('country') captured_country_codes.update([r['country'] for r in rows]) return [(code, name) for code, name in list(countries) if code in captured_country_codes] def get_region_countries(region): """ Return a (code, country) list for a region. """ if region == 'ALL': return get_countries() else: country_codes = REGION_COUNTRY_MAP[region] return [(code, name) for code, name in list(countries) if code in country_codes] def get_country_region(country): """ Return a (id, region_name) list to which a country belongs. """ if country == 'ALL': return get_regions() else: return [(0, [k for k, v in REGION_COUNTRY_MAP.items() if country in v][0])] def clean_title(text): """ Return the string passed in stripped of its numbers and parentheses """ if text != "Congo (the Democratic Republic of the)": return text[text.find(')')+1:].lstrip() return text class XLSXDataExportBuilder(): def __init__(self, request): self.domain = "http://%s" % get_current_site(request).domain self.sheet_exclude_fields = ['monitor', 'url_and_multimedia', 'time_accessed', 'country_region'] self.person_exclude_fields = [] self.journalist_exclude_fields =[] self.sheet_fields_with_id = ['topic', 'scope', 'person_secondary', 'inequality_women', 'stereotypes'] self.person_fields_with_id = ['sex', 'age', 'occupation', 'function', 'survivor_of', 'victim_of'] self.journalist_fields_with_id = ['sex', 'age'] def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) for model in sheet_models.itervalues(): self.create_sheet_export(model, workbook) for model in person_models.itervalues(): self.create_person_export(model, workbook) for model in journalist_models.itervalues(): self.create_journalist_export(model, workbook) workbook.close() output.seek(0) return output.read() def create_sheet_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all() row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.sheet_fields_with_id) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_sheet_row(obj, ws, row+y, col, fields, self.sheet_fields_with_id) def create_person_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.person_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.person_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_person_row(obj, ws, row+y, col, fields, self.person_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def create_journalist_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.journalist_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.journalist_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_journalist_row(obj, ws, row+y, col, fields, self.journalist_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def write_ws_titles(self, ws, row, col, fields, fields_with_id, append_sheet=False): """ Writes the column titles to the worksheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name :param append_sheet: Boolean specifying whether the related sheet object needs to be appended to the row. """ if not append_sheet: for field in fields: ws.write(row, col, unicode(field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode(field.name+"_id")) col += 1 ws.write(row, col, unicode('edit_url')) col += 1 else: for field in fields: ws.write(row, col, unicode("sheet_" + field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode("sheet_" + field.name + "_id")) col += 1 ws.write(row, col, unicode('sheet_edit_url')) col += 1 return ws, col def write_sheet_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Sheet models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'country': ws.write(row, col, getattr(obj, field.name).code) elif field.name == 'topic': ws.write(row, col, unicode(TOPICS[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, TOPICS[getattr(obj, field.name)-1][0]) elif field.name == 'scope': ws.write(row, col, unicode(SCOPE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, SCOPE[getattr(obj, field.name)-1][0]) elif field.name == 'person_secondary': ws.write(row, col, unicode(SOURCE[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SOURCE[getattr(obj, field.name)][0]) elif field.name == 'inequality_women': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'stereotypes': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) else: try: ws.write(row, col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row, col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( obj._meta.app_label, obj._meta.model_name), args=(obj.id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_person_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Person models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age': ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == 'occupation': ws.write(row, col, unicode(OCCUPATION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, OCCUPATION[getattr(obj, field.name)][0]) elif field.name == 'function': ws.write(row, col, unicode(FUNCTION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, FUNCTION[getattr(obj, field.name)][0]) elif field.name == 'victim_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(VICTIM_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, VICTIM_OF[getattr(obj, field.name)][0]) elif field.name == 'survivor_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(SURVIVOR_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SURVIVOR_OF[getattr(obj, field.name)][0]) elif field.name == 'is_photograph': ws.write(row, col, unicode(IS_PHOTOGRAPH[getattr(obj, field.name)-1][1])) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in self.person_fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_journalist_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Journalist models to the worksheet :param obj: Reference to the model instance which is being written to the sheet_fields_with_id :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet_fields_with_id :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col class XLSXReportBuilder: def __init__(self, form): from reports.views import CountryForm, RegionForm self.form = form if isinstance(form, CountryForm): self.countries = form.filter_countries() self.regions = get_country_region(form.cleaned_data['country']) self.report_type = 'country' elif isinstance(form, RegionForm): region = [name for i, name in form.REGIONS if str(i) == form.cleaned_data['region']][0] self.countries = get_region_countries(region) self.regions = [(0, region)] self.report_type = 'region' else: self.countries = get_countries() self.regions = get_regions() self.report_type = 'global' self.country_list = [code for code, name in self.countries] self.region_list = [name for id, name in self.regions] # Various gender utilities self.male_female = [(id, value) for id, value in GENDER if id in [1, 2]] self.male_female_ids = [id for id, value in self.male_female] self.female = [(id, value) for id, value in GENDER if id == 1] self.yes = [(id, value) for id, value in YESNO if id == 'Y'] self.gmmp_year = '2015' def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) # setup formats self.heading = workbook.add_format(FORMATS['heading']) self.col_heading = workbook.add_format(FORMATS['col_heading']) self.col_heading_def = workbook.add_format(FORMATS['col_heading_def']) self.sec_col_heading = workbook.add_format(FORMATS['sec_col_heading']) self.sec_col_heading_def = workbook.add_format(FORMATS['sec_col_heading_def']) self.label = workbook.add_format(FORMATS['label']) self.N = workbook.add_format(FORMATS['N']) self.P = workbook.add_format(FORMATS['P']) # Use the following for specifying which reports to create durin dev # test_functions = [ # 'ws_01', 'ws_02', 'ws_04', 'ws_05', 'ws_06', 'ws_07', 'ws_08', 'ws_09', 'ws_10', # 'ws_11', 'ws_12', 'ws_13', 'ws_14', 'ws_15', 'ws_16', 'ws_17', 'ws_18', 'ws_19', 'ws_20', # 'ws_21', 'ws_23', 'ws_24', 'ws_25', 'ws_26', 'ws_27', 'ws_28', 'ws_29', 'ws_30', # 'ws_31', 'ws_32', 'ws_34', 'ws_35', 'ws_36', 'ws_38', 'ws_39', 'ws_40', # 'ws_41', 'ws_42', 'ws_43', 'ws_44', 'ws_45', 'ws_46', 'ws_47', 'ws_48',] test_functions = ['ws_02'] sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) for function in test_functions: if self.report_type in sheet_info[function]['reports']: ws = workbook.add_worksheet(sheet_info[function]['name']) self.write_headers(ws, sheet_info[function]['title'], sheet_info[function]['desc']) getattr(self, function)(ws) # ------------------------------------------------------------------- # To ensure ordered worksheets # sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) # for ws_num, ws_info in sheet_info.iteritems(): # if self.report_type in ws_info['reports']: # ws = workbook.add_worksheet(ws_info['name']) # self.write_headers(ws, ws_info['title'], ws_info['desc']) # getattr(self, ws_num)(ws) workbook.close() output.seek(0) return output.read() def ws_01(self, ws): """ Cols: Media Type Rows: Region """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country_region__region')\ .filter(country_region__region__in=self.region_list)\ .annotate(n=Count('id')) for row in rows: if row['country_region__region'] is not None: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [region[0] for region in self.regions if region[1] == row['country_region__region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_02(self, ws): """ Cols: Media Type Rows: Country """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country')\ .filter(country__in=self.country_list)\ .annotate(n=Count('country')) for row in rows: if row['country'] is not None: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['country']): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.countries, row_perc=True) def ws_04(self, ws): """ Cols: Region, Media type Rows: Major Topic """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('topic')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] major_topic = TOPIC_GROUPS[r['topic']] counts.update({(media_id, major_topic): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, MEDIA_TYPES, MAJOR_TOPICS, row_perc=False, sec_cols=10) def ws_05(self, ws): """ Cols: Subject sex Rows: Major Topic """ counts = Counter() for model in person_models.itervalues(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r[topic_field]]): r['n']}) self.tabulate(ws, counts, self.male_female, MAJOR_TOPICS, row_perc=True, display_cols=self.female) def ws_06(self, ws): """ Cols: Region, Subject sex: female only Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in person_models.itervalues(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country_region__region':region})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r[topic_field]]): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, MAJOR_TOPICS, row_perc=True, sec_cols=2, display_cols=self.female) def ws_07(self, ws): """ Cols: Media Type Rows: Subject Sex """ counts = Counter() for media_type, model in person_models.iteritems(): rows = model.objects\ .values('sex')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, r['sex']): r['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.male_female, row_perc=False) def ws_08(self, ws): """ Cols: Subject Sex Rows: Scope """ counts = Counter() for media_type, model in person_models.iteritems(): if 'scope' in model.sheet_field().rel.to._meta.get_all_field_names(): scope = '%s__scope' % model.sheet_name() rows = model.objects\ .values('sex', scope)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[scope]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SCOPE, row_perc=True, display_cols=self.female) def ws_09(self, ws): """ Cols: Subject Sex Rows: Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[topic]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_10(self, ws): """ Cols: Space Rows: Minor Topics :: Newspaper Sheets only """ # Calculate row values for column counts = Counter() rows = NewspaperSheet.objects\ .values('space', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['space'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, SPACE, MAJOR_TOPICS, row_perc=False) def ws_11(self, ws): """ Cols: Equality Rights Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_12(self, ws): """ Cols: Region, Equality Rights Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region_name in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): # Some models has no equality rights field if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country_region__region=region_name)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_13(self, ws): """ Cols: Journalist Sex, Equality Rights Rows: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): topic = '%s__topic' % model.sheet_name() equality_rights = '%s__equality_rights' % model.sheet_name() rows = model.objects\ .values(equality_rights, topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex=gender_id)\ .annotate(n=Count('id')) for r in rows: counts.update({(r[equality_rights], TOPIC_GROUPS[r[topic]]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_14(self, ws): """ Cols: Sex Rows: Occupation """ counts = Counter() for model in person_models.itervalues(): # some Person models don't have an occupation field if 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, OCCUPATION, row_perc=True, display_cols=self.female) def ws_15(self, ws): """ Cols: Sex Rows: Function """ counts = Counter() for model in person_models.itervalues(): # some Person models don't have a function field if 'function' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, FUNCTION, row_perc=True, display_cols=self.female) def ws_16(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() for function_id, function in FUNCTION: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=function_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_17(self, ws): """ Cols: Age, Sex of Subject Rows: Function """ secondary_counts = OrderedDict() for age_id, age in AGES: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'age' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(age=age_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) secondary_counts[age] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, FUNCTION, row_perc=False, sec_cols=4) def ws_18(self, ws): """ Cols: Sex Rows: Age :: Only for print """ counts = Counter() rows = NewspaperPerson.objects\ .values('sex', 'age')\ .filter(newspaper_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_19(self, ws): """ Cols: Sex Rows: Age :: Only for broadcast """ counts = Counter() broadcast = ['Television'] for media_type, model in person_models.iteritems(): if media_type in broadcast: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_20(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() functions_count = Counter() # Get top 5 functions for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) functions_count.update({(r['function']): r['n'] for r in rows}) top_5_function_ids = [id for id, count in sorted(functions_count.items(), key=lambda x: -x[1])[:5]] top_5_functions = [(id, func) for id, func in FUNCTION if id in top_5_function_ids] for func_id, function in top_5_functions: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=func_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_21(self, ws): """ Cols: Subject Sex Rows: Victim type """ counts = Counter() for model in person_models.itervalues(): if 'victim_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'victim_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .exclude(victim_of=None)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['victim_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, VICTIM_OF, row_perc=True) def ws_23(self, ws): """ Cols: Subject Sex Rows: Survivor type """ counts = Counter() for model in person_models.itervalues(): if 'survivor_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'survivor_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .exclude(survivor_of=None)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['survivor_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SURVIVOR_OF, row_perc=True) def ws_24(self, ws): """ Cols: Subject Sex Rows: Family Role """ counts = Counter() for model in person_models.itervalues(): if 'family_role' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_25(self, ws): """ Cols: Journalist Sex, Subject Sex Rows: Family Role """ secondary_counts = OrderedDict() for sex_id, sex in self.male_female: counts = Counter() for model in person_models.itervalues(): if 'family_role' in model._meta.get_all_field_names(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) secondary_counts[sex] = counts secondary_counts['col_title_def'] = [ 'Sexof reporter', 'Sex of news subject'] self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, YESNO, row_perc=False, sec_cols=4) def ws_26(self, ws): """ Cols: Subject Sex Rows: Whether Quoted """ counts = Counter() for model in person_models.itervalues(): if 'is_quoted' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['is_quoted']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_27(self, ws): """ Cols: Subject Sex Rows: Photographed """ counts = Counter() for model in person_models.itervalues(): if 'is_photograph' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['is_photograph']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, IS_PHOTOGRAPH, row_perc=False) def ws_28(self, ws): """ Cols: Medium Rows: Region :: Female reporters only """ counts = Counter() for media_type, model in person_models.iteritems(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values(region)\ .filter(sex=1)\ .filter(**{region + '__in': self.region_list})\ .annotate(n=Count('id')) for row in rows: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [r[0] for r in self.regions if r[1] == row[region]][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_29(self, ws): """ Cols: Regions Rows: Scope :: Female reporters only """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' scope = sheet_name + '__scope' if 'scope' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, scope)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row[region]][0] counts.update({(region_id, row[scope]): row['n']}) self.tabulate(ws, counts, self.regions, SCOPE, row_perc=False) def ws_30(self, ws): """ Cols: Region Rows: Major Topics :: Female reporters only """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, topic)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row[region]][0] major_topic = TOPIC_GROUPS[row[topic]] counts.update({(region_id, major_topic): row['n']}) self.tabulate(ws, counts, self.regions, MAJOR_TOPICS, row_perc=False) def ws_31(self, ws): """ Cols: Sex of Reporter Rows: Minor Topics """ counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[topic]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_32(self, ws): """ Cols: Medium Rows: Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row[topic]): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, TOPICS, row_perc=False) def ws_34(self, ws): """ Cols: Sex of reporter Rows: Sex of subject """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of subject' self.tabulate(ws, counts, self.male_female, GENDER, row_perc=True, display_cols=self.female) def ws_35(self, ws): """ Cols: Sex of reporter Rows: Age of reporter :: Only for television """ counts = Counter() rows = TelevisionJournalist.objects\ .values('sex', 'age')\ .filter(television_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True, display_cols=self.female) def ws_36(self, ws): """ Cols: Sex of Reporter Rows: Focus: about women """ counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() about_women = sheet_name + '__about_women' if 'about_women' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', about_women)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[about_women]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=True) def ws_38(self, ws): """ Cols: Focus: about women Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['about_women'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_39(self, ws): """ Cols: Focus: about women Rows: Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=True) def ws_40(self, ws): """ Cols: Region, Topics Rows: Focus: about women """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'about_women')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=2, display_cols=self.yes) def ws_41(self, ws): """ Cols: Equality rights raised Rows: Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=False) def ws_42(self, ws): """ Cols: Region, Equality rights raised Rows: Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_43(self, ws): """ Cols: Sex of reporter, Equality rights raised Cols: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic, equality_rights)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=gender_id)\ .annotate(n=Count('id')) counts.update({(r[equality_rights], r[topic]): r['n'] for r in rows}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_44(self, ws): """ Cols: Sex of reporter, Equality rights raised Rows: Region """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(equality_rights, region)\ .filter(sex=gender_id)\ .filter(**{region + '__in':self.region_list})\ .annotate(n=Count('id')) for r in rows: region_id = [id for id, name in self.regions if name == r[region]][0] counts.update({(r[equality_rights], region_id): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.regions, row_perc=False, sec_cols=4) def ws_45(self, ws): """ Cols: Sex of news subject Rows: Region :: Equality rights raised == Yes """ counts = Counter() for model in person_models.itervalues(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): region = model.sheet_name() + '__country_region__region' equality_rights = model.sheet_name() + '__equality_rights' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in':self.region_list})\ .filter(**{equality_rights:'Y'})\ .annotate(n=Count('id')) for r in rows: region_id = [id for id, name in self.regions if name == r[region]][0] counts.update({(r['sex'], region_id): r['n']}) self.tabulate(ws, counts, self.male_female, self.regions, row_perc=True) def ws_46(self, ws): """ Cols: Region, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) for r in rows: counts.update({(TOPIC_GROUPS[r['topic']], r['stereotypes']): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True, sec_cols=8) def ws_47(self, ws): """ Cols: Stereotypes Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True) def ws_48(self, ws): """ Cols: Sex of reporter, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' stereotypes = sheet_name + '__stereotypes' if 'stereotypes' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(stereotypes, topic)\ .filter(sex=gender_id)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) for r in rows: counts.update({(r[stereotypes], TOPIC_GROUPS[r[topic]]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=False, sec_cols=8) def ws_53(self, ws): """ Cols: Topic Rows: Country :: Internet media type only :: Female reporters only """ display_cols = [(id, value) for id, value in GENDER if id==1] secondary_counts = OrderedDict() model = sheet_models.get('Internet News') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() journo_sex_field = '%s__sex' % model.journalist_field_name() rows = model.objects\ .values(journo_sex_field, 'country')\ .filter(topic__in=topic_ids)\ .annotate(n=Count('id')) counts.update({(r[journo_sex_field], r['country']): r['n'] for r in rows}) secondary_counts[major_topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, display_cols=display_cols, sec_cols=2) def ws_54(self, ws): """ Cols: Major Topic, sex of subject Rows: Country :: Internet media type only """ secondary_counts = OrderedDict() model = person_models.get('Internet News') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('sex', country_field)\ .filter(**{model.sheet_name() + '__topic__in':topic_ids})\ .annotate(n=Count('id')) counts.update({(r['sex'], r[country_field]): r['n'] for r in rows}) secondary_counts[major_topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, sec_cols=8) def ws_55(self, ws): """ Cols: Occupation Rows: Country :: Show all countries :: Only female subjects :: Internet media type only """ counts = Counter() model = person_models.get('Internet News') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'occupation')\ .filter(sex=1)\ .annotate(n=Count('id')) counts.update({(r['occupation'], r[country_field]): r['n'] for r in rows}) self.tabulate(ws, counts, OCCUPATION, self.countries, row_perc=True) def ws_56(self, ws): """ Cols: Function Rows: Country :: Show all countries :: Internet media type only """ counts = Counter() model = person_models.get('Internet News') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'function')\ .annotate(n=Count('id')) counts.update({(r['function'], r[country_field]): r['n'] for r in rows}) self.tabulate(ws, counts, FUNCTION, self.countries, row_perc=True) def ws_57(self, ws): """ Cols: Sex of subject Rows: Country, Family role :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['family_role']): row['n'] for row in rows} # If only captured countries should be displayed use # if counts.keys(): self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_58(self, ws): """ Cols: Sex of subject Rows: Country, is photographed :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['is_photograph']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, IS_PHOTOGRAPH, row_perc=True, sec_row=True, r=r) r += len(IS_PHOTOGRAPH) def ws_59(self, ws): """ Cols: Sex of reporter Rows: Sex of subject :: Internet media only """ counts = Counter() model = person_models.get('Internet News') sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) self.tabulate(ws, counts, GENDER, GENDER, row_perc=False) def ws_60(self, ws): """ Cols: Sex of subject Rows: Country, age :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['age']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, AGES, row_perc=True, sec_row=True, r=r) r += len(AGES) def ws_61(self, ws): """ Cols: Sex of subject Rows: Country, is_quoted :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['is_quoted']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_62(self, ws): """ Cols: Topic Rows: Country, equality raised :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['equality_rights']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_63(self, ws): """ Cols: Topic Rows: Country, stereotypes challenged :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_64(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_65(self, ws): """ Cols: Topic Rows: Country, tweet or retweet :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'retweet')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['retweet']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, RETWEET, row_perc=False, sec_row=True, r=r) r += len(RETWEET) def ws_66(self, ws): """ Cols: Topic Rows: Country, sex of news subject :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Twitter') topic_field = '%s__topic' % model.sheet_name() for code, country in self.countries: rows = model.objects\ .values(topic_field, 'sex')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts.update({(row[topic_field], row['sex']): row['n'] for row in rows}) self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, GENDER, row_perc=True, sec_row=True, r=r) r += len(GENDER) def ws_67(self, ws): """ Cols: Topic Rows: Country :: Only female journalists :: Show all countries :: Twitter media type only """ counts = Counter() model = sheet_models.get('Twitter') rows = model.objects\ .values('topic', 'country')\ .filter(**{model.journalist_field_name() + '__sex':1})\ .annotate(n=Count('id')) counts.update({(row['topic'], row['country']): row['n'] for row in rows}) self.tabulate(ws, counts, TOPICS, self.countries, row_perc=True, sec_row=False) def ws_68(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=False, sec_row=True, r=r) r += len(YESNO) def ws_69(self, ws): """ Cols: Topic Rows: Country, stereotypes :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_76(self, ws): """ Cols: Topic, Stereotypes Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in sheet_models.itervalues(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'country')\ .filter(topic=topic_id)\ .annotate(n=Count('id')) counts.update({(r['stereotypes'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, self.countries, row_perc=True, sec_cols=8) def ws_77(self, ws): """ Cols: Topic, Reference to gender equality Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'country')\ .filter(topic=topic_id)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.countries, row_perc=True, sec_cols=4) def ws_78(self, ws): """ Cols: Topic, victim_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in person_models.itervalues(): if 'victim_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('victim_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id})\ .annotate(n=Count('id')) counts.update({(r['victim_of'], r[country_field]): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, VICTIM_OF, self.countries, row_perc=True, sec_cols=18) def ws_79(self, ws): """ Cols: Topic, survivor_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in person_models.itervalues(): if 'survivor_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('survivor_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id})\ .annotate(n=Count('id')) counts.update({(r['survivor_of'], r[country_field]): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, SURVIVOR_OF, self.countries, row_perc=True, sec_cols=18) # ------------------------------------------------------------------------------- # Helper functions # def write_headers(self, ws, title, description): """ Write the headers to the worksheet """ ws.write(0, 0, title, self.heading) ws.write(1, 0, description, self.heading) ws.write(3, 2, self.gmmp_year, self.heading) def write_col_headings(self, ws, cols, c=2, r=4): """ :param ws: worksheet to write to :param cols: list of `(col_id, col_title)` tuples of column ids and titles :param r, c: initial position where cursor should start writing to """ for col_id, col_title in cols: ws.write(r, c, clean_title(col_title), self.col_heading) ws.write(r + 1, c, "N") ws.write(r + 1, c + 1, "%") c += 2 def write_primary_row_heading(self, ws, heading, c=0, r=6): """ :param ws: worksheet to write to :param heading: row heading to write :param r, c: position where heading should be written to """ ws.write(r, c, clean_title(heading), self.heading) def tabulate_secondary_cols(self, ws, secondary_counts, cols, rows, row_perc=False, display_cols=None, sec_cols=4): """ :param ws: worksheet to write to :param secondary_counts: dict in following format: {'Primary column heading': Count object, ...} :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_cols: amount of cols needed for secondary cols """ r, c = 7, 1 # row titles for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 if 'col_title_def' in secondary_counts: # Write definitions of column heading titles ws.write(r-3, c-1, secondary_counts['col_title_def'][0], self.sec_col_heading_def) ws.write(r-2, c-1, secondary_counts['col_title_def'][1], self.col_heading_def) secondary_counts.pop('col_title_def') for field, counts in secondary_counts.iteritems(): ws.merge_range(r-3, c, r-3, c+sec_cols-1, clean_title(field), self.sec_col_heading) self.tabulate(ws, counts, cols, rows, row_perc=row_perc, sec_col=True, display_cols=display_cols, r=7, c=c) c += sec_cols def tabulate(self, ws, counts, cols, rows, row_perc=False, sec_col=False, sec_row=False, display_cols=None, c=1, r=6): """ Emit a table. :param ws: worksheet to write to :param dict counts: dict from `(col_id, row_id)` tuples to count for that combination. :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_col: Are wecreating a secondary column title(default: False) :param sec_row: Are we creating a secondary row title(default: False) :param display_cols: Optional if only a subset of columns should be displayed e.g. only female :param r, c: initial position where cursor should start writing to """ if row_perc: # we'll need percentage by rows row_totals = {} for row_id, row_title in rows: row_totals[row_id] = sum(counts.get((col_id, row_id), 0) for col_id, _ in cols) # noqa # row titles if not sec_col: # Else already written for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 # if only filtered results should be shown # e.g. only print female columns if display_cols: cols = display_cols if 'col_title_def' in counts and not sec_row: ws.write(r - 2, c-1, counts['col_title_def'], self.col_heading_def) counts.pop('col_title_def') # values, written by column for col_id, col_heading in cols: # column title if not sec_row: ws.merge_range(r-2, c, r-2, c+1, clean_title(col_heading), self.col_heading) ws.write(r - 1, c, "N", self.label) ws.write(r - 1, c + 1, "%", self.label) if not row_perc: # column totals # Confirm: Perc of col total or matrix total? # total = sum(counts.itervalues()) total = sum(counts.get((col_id, row_id), 0) for row_id, _ in rows) # row values for this column for i, row in enumerate(rows): row_id, row_title = row if row_perc: # row totals total = row_totals[row_id] n = counts.get((col_id, row_id), 0) ws.write(r + i, c, n, self.N) ws.write(r + i, c + 1, p(n, total), self.P) c += 2 Group ws-02 countries by region # Python import StringIO from collections import Counter, OrderedDict # Django from django.core import urlresolvers from django_countries import countries from django.db.models import Count, FieldDoesNotExist from django.contrib.sites.shortcuts import get_current_site # 3rd Party import xlsxwriter # Project from forms.models import ( NewspaperSheet, NewspaperPerson, TelevisionJournalist, person_models, sheet_models, journalist_models) from forms.modelutils import (TOPICS, GENDER, SPACE, OCCUPATION, FUNCTION, SCOPE, YESNO, AGES, SOURCE, VICTIM_OF, SURVIVOR_OF, IS_PHOTOGRAPH, AGREE_DISAGREE, RETWEET, TV_ROLE, MEDIA_TYPES, CountryRegion) from report_details import WS_INFO, REGION_COUNTRY_MAP, MAJOR_TOPICS, TOPIC_GROUPS, GROUP_TOPICS_MAP, FORMATS def has_field(model, fld): try: model._meta.get_field(fld) return True except FieldDoesNotExist: return False def p(n, d): """ Helper to calculate the percentage of n / d, returning 0 if d == 0. """ if d == 0: return 0.0 return float(n) / d def get_regions(): """ Return a (id, region_name) list for all regions """ country_regions = CountryRegion.objects\ .values('region')\ .exclude(region='Unmapped') regions = set(item['region'] for item in country_regions) return [(i, region) for i, region in enumerate(regions)] def get_countries(selected=None): """ Return a (code, country) list for countries captured. """ captured_country_codes = set() for model in sheet_models.itervalues(): rows = model.objects.values('country') captured_country_codes.update([r['country'] for r in rows]) return [(code, name) for code, name in list(countries) if code in captured_country_codes] def get_region_countries(region): """ Return a (code, country) list for a region. """ if region == 'ALL': return get_countries() else: country_codes = REGION_COUNTRY_MAP[region] return [(code, name) for code, name in list(countries) if code in country_codes] def get_country_region(country): """ Return a (id, region_name) list to which a country belongs. """ if country == 'ALL': return get_regions() else: return [(0, [k for k, v in REGION_COUNTRY_MAP.items() if country in v][0])] def clean_title(text): """ Return the string passed in stripped of its numbers and parentheses """ if text != "Congo (the Democratic Republic of the)": return text[text.find(')')+1:].lstrip() return text class XLSXDataExportBuilder(): def __init__(self, request): self.domain = "http://%s" % get_current_site(request).domain self.sheet_exclude_fields = ['monitor', 'url_and_multimedia', 'time_accessed', 'country_region'] self.person_exclude_fields = [] self.journalist_exclude_fields =[] self.sheet_fields_with_id = ['topic', 'scope', 'person_secondary', 'inequality_women', 'stereotypes'] self.person_fields_with_id = ['sex', 'age', 'occupation', 'function', 'survivor_of', 'victim_of'] self.journalist_fields_with_id = ['sex', 'age'] def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) for model in sheet_models.itervalues(): self.create_sheet_export(model, workbook) for model in person_models.itervalues(): self.create_person_export(model, workbook) for model in journalist_models.itervalues(): self.create_journalist_export(model, workbook) workbook.close() output.seek(0) return output.read() def create_sheet_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all() row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.sheet_fields_with_id) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_sheet_row(obj, ws, row+y, col, fields, self.sheet_fields_with_id) def create_person_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.person_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.person_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_person_row(obj, ws, row+y, col, fields, self.person_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def create_journalist_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.journalist_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.journalist_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_journalist_row(obj, ws, row+y, col, fields, self.journalist_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def write_ws_titles(self, ws, row, col, fields, fields_with_id, append_sheet=False): """ Writes the column titles to the worksheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name :param append_sheet: Boolean specifying whether the related sheet object needs to be appended to the row. """ if not append_sheet: for field in fields: ws.write(row, col, unicode(field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode(field.name+"_id")) col += 1 ws.write(row, col, unicode('edit_url')) col += 1 else: for field in fields: ws.write(row, col, unicode("sheet_" + field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode("sheet_" + field.name + "_id")) col += 1 ws.write(row, col, unicode('sheet_edit_url')) col += 1 return ws, col def write_sheet_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Sheet models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'country': ws.write(row, col, getattr(obj, field.name).code) elif field.name == 'topic': ws.write(row, col, unicode(TOPICS[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, TOPICS[getattr(obj, field.name)-1][0]) elif field.name == 'scope': ws.write(row, col, unicode(SCOPE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, SCOPE[getattr(obj, field.name)-1][0]) elif field.name == 'person_secondary': ws.write(row, col, unicode(SOURCE[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SOURCE[getattr(obj, field.name)][0]) elif field.name == 'inequality_women': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'stereotypes': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) else: try: ws.write(row, col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row, col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( obj._meta.app_label, obj._meta.model_name), args=(obj.id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_person_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Person models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age': ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == 'occupation': ws.write(row, col, unicode(OCCUPATION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, OCCUPATION[getattr(obj, field.name)][0]) elif field.name == 'function': ws.write(row, col, unicode(FUNCTION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, FUNCTION[getattr(obj, field.name)][0]) elif field.name == 'victim_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(VICTIM_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, VICTIM_OF[getattr(obj, field.name)][0]) elif field.name == 'survivor_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(SURVIVOR_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SURVIVOR_OF[getattr(obj, field.name)][0]) elif field.name == 'is_photograph': ws.write(row, col, unicode(IS_PHOTOGRAPH[getattr(obj, field.name)-1][1])) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in self.person_fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_journalist_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Journalist models to the worksheet :param obj: Reference to the model instance which is being written to the sheet_fields_with_id :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet_fields_with_id :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col class XLSXReportBuilder: def __init__(self, form): from reports.views import CountryForm, RegionForm self.form = form if isinstance(form, CountryForm): self.countries = form.filter_countries() self.regions = get_country_region(form.cleaned_data['country']) self.report_type = 'country' elif isinstance(form, RegionForm): region = [name for i, name in form.REGIONS if str(i) == form.cleaned_data['region']][0] self.countries = get_region_countries(region) self.regions = [(0, region)] self.report_type = 'region' else: self.countries = get_countries() self.regions = get_regions() self.report_type = 'global' self.country_list = [code for code, name in self.countries] self.region_list = [name for id, name in self.regions] # Various gender utilities self.male_female = [(id, value) for id, value in GENDER if id in [1, 2]] self.male_female_ids = [id for id, value in self.male_female] self.female = [(id, value) for id, value in GENDER if id == 1] self.yes = [(id, value) for id, value in YESNO if id == 'Y'] self.gmmp_year = '2015' def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) # setup formats self.heading = workbook.add_format(FORMATS['heading']) self.col_heading = workbook.add_format(FORMATS['col_heading']) self.col_heading_def = workbook.add_format(FORMATS['col_heading_def']) self.sec_col_heading = workbook.add_format(FORMATS['sec_col_heading']) self.sec_col_heading_def = workbook.add_format(FORMATS['sec_col_heading_def']) self.label = workbook.add_format(FORMATS['label']) self.N = workbook.add_format(FORMATS['N']) self.P = workbook.add_format(FORMATS['P']) # Use the following for specifying which reports to create durin dev # test_functions = [ # 'ws_01', 'ws_02', 'ws_04', 'ws_05', 'ws_06', 'ws_07', 'ws_08', 'ws_09', 'ws_10', # 'ws_11', 'ws_12', 'ws_13', 'ws_14', 'ws_15', 'ws_16', 'ws_17', 'ws_18', 'ws_19', 'ws_20', # 'ws_21', 'ws_23', 'ws_24', 'ws_25', 'ws_26', 'ws_27', 'ws_28', 'ws_29', 'ws_30', # 'ws_31', 'ws_32', 'ws_34', 'ws_35', 'ws_36', 'ws_38', 'ws_39', 'ws_40', # 'ws_41', 'ws_42', 'ws_43', 'ws_44', 'ws_45', 'ws_46', 'ws_47', 'ws_48',] test_functions = ['ws_02'] sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) for function in test_functions: if self.report_type in sheet_info[function]['reports']: ws = workbook.add_worksheet(sheet_info[function]['name']) self.write_headers(ws, sheet_info[function]['title'], sheet_info[function]['desc']) getattr(self, function)(ws) # ------------------------------------------------------------------- # To ensure ordered worksheets # sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) # for ws_num, ws_info in sheet_info.iteritems(): # if self.report_type in ws_info['reports']: # ws = workbook.add_worksheet(ws_info['name']) # self.write_headers(ws, ws_info['title'], ws_info['desc']) # getattr(self, ws_num)(ws) workbook.close() output.seek(0) return output.read() def ws_01(self, ws): """ Cols: Media Type Rows: Region """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country_region__region')\ .filter(country_region__region__in=self.region_list)\ .annotate(n=Count('id')) for row in rows: if row['country_region__region'] is not None: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [region[0] for region in self.regions if region[1] == row['country_region__region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_02(self, ws): """ Cols: Media Type Rows: Region, Country """ r = 6 self.write_col_headings(ws, MEDIA_TYPES) counts = Counter() for region_id, region in self.regions: for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country')\ .filter(country__in=self.country_list)\ .annotate(n=Count('country')) for row in rows: if row['country'] is not None: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['country']): row['n']}) self.write_primary_row_heading(ws, region, r=r) region_countries = [(code, country) for code, country in self.countries if code in REGION_COUNTRY_MAP[region]] self.tabulate(ws, counts, MEDIA_TYPES, region_countries, row_perc=True, sec_row=True, r=r) r += len(region_countries) def ws_04(self, ws): """ Cols: Region, Media type Rows: Major Topic """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('topic')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] major_topic = TOPIC_GROUPS[r['topic']] counts.update({(media_id, major_topic): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, MEDIA_TYPES, MAJOR_TOPICS, row_perc=False, sec_cols=10) def ws_05(self, ws): """ Cols: Subject sex Rows: Major Topic """ counts = Counter() for model in person_models.itervalues(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r[topic_field]]): r['n']}) self.tabulate(ws, counts, self.male_female, MAJOR_TOPICS, row_perc=True, display_cols=self.female) def ws_06(self, ws): """ Cols: Region, Subject sex: female only Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in person_models.itervalues(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country_region__region':region})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r[topic_field]]): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, MAJOR_TOPICS, row_perc=True, sec_cols=2, display_cols=self.female) def ws_07(self, ws): """ Cols: Media Type Rows: Subject Sex """ counts = Counter() for media_type, model in person_models.iteritems(): rows = model.objects\ .values('sex')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, r['sex']): r['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.male_female, row_perc=False) def ws_08(self, ws): """ Cols: Subject Sex Rows: Scope """ counts = Counter() for media_type, model in person_models.iteritems(): if 'scope' in model.sheet_field().rel.to._meta.get_all_field_names(): scope = '%s__scope' % model.sheet_name() rows = model.objects\ .values('sex', scope)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[scope]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SCOPE, row_perc=True, display_cols=self.female) def ws_09(self, ws): """ Cols: Subject Sex Rows: Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[topic]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_10(self, ws): """ Cols: Space Rows: Minor Topics :: Newspaper Sheets only """ # Calculate row values for column counts = Counter() rows = NewspaperSheet.objects\ .values('space', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['space'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, SPACE, MAJOR_TOPICS, row_perc=False) def ws_11(self, ws): """ Cols: Equality Rights Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_12(self, ws): """ Cols: Region, Equality Rights Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region_name in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): # Some models has no equality rights field if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country_region__region=region_name)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_13(self, ws): """ Cols: Journalist Sex, Equality Rights Rows: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): topic = '%s__topic' % model.sheet_name() equality_rights = '%s__equality_rights' % model.sheet_name() rows = model.objects\ .values(equality_rights, topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex=gender_id)\ .annotate(n=Count('id')) for r in rows: counts.update({(r[equality_rights], TOPIC_GROUPS[r[topic]]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_14(self, ws): """ Cols: Sex Rows: Occupation """ counts = Counter() for model in person_models.itervalues(): # some Person models don't have an occupation field if 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, OCCUPATION, row_perc=True, display_cols=self.female) def ws_15(self, ws): """ Cols: Sex Rows: Function """ counts = Counter() for model in person_models.itervalues(): # some Person models don't have a function field if 'function' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, FUNCTION, row_perc=True, display_cols=self.female) def ws_16(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() for function_id, function in FUNCTION: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=function_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_17(self, ws): """ Cols: Age, Sex of Subject Rows: Function """ secondary_counts = OrderedDict() for age_id, age in AGES: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'age' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(age=age_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) secondary_counts[age] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, FUNCTION, row_perc=False, sec_cols=4) def ws_18(self, ws): """ Cols: Sex Rows: Age :: Only for print """ counts = Counter() rows = NewspaperPerson.objects\ .values('sex', 'age')\ .filter(newspaper_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_19(self, ws): """ Cols: Sex Rows: Age :: Only for broadcast """ counts = Counter() broadcast = ['Television'] for media_type, model in person_models.iteritems(): if media_type in broadcast: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_20(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() functions_count = Counter() # Get top 5 functions for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) functions_count.update({(r['function']): r['n'] for r in rows}) top_5_function_ids = [id for id, count in sorted(functions_count.items(), key=lambda x: -x[1])[:5]] top_5_functions = [(id, func) for id, func in FUNCTION if id in top_5_function_ids] for func_id, function in top_5_functions: counts = Counter() for model in person_models.itervalues(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=func_id)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_21(self, ws): """ Cols: Subject Sex Rows: Victim type """ counts = Counter() for model in person_models.itervalues(): if 'victim_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'victim_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .exclude(victim_of=None)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['victim_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, VICTIM_OF, row_perc=True) def ws_23(self, ws): """ Cols: Subject Sex Rows: Survivor type """ counts = Counter() for model in person_models.itervalues(): if 'survivor_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'survivor_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .exclude(survivor_of=None)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['survivor_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SURVIVOR_OF, row_perc=True) def ws_24(self, ws): """ Cols: Subject Sex Rows: Family Role """ counts = Counter() for model in person_models.itervalues(): if 'family_role' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_25(self, ws): """ Cols: Journalist Sex, Subject Sex Rows: Family Role """ secondary_counts = OrderedDict() for sex_id, sex in self.male_female: counts = Counter() for model in person_models.itervalues(): if 'family_role' in model._meta.get_all_field_names(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) secondary_counts[sex] = counts secondary_counts['col_title_def'] = [ 'Sexof reporter', 'Sex of news subject'] self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, YESNO, row_perc=False, sec_cols=4) def ws_26(self, ws): """ Cols: Subject Sex Rows: Whether Quoted """ counts = Counter() for model in person_models.itervalues(): if 'is_quoted' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['is_quoted']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_27(self, ws): """ Cols: Subject Sex Rows: Photographed """ counts = Counter() for model in person_models.itervalues(): if 'is_photograph' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['is_photograph']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, IS_PHOTOGRAPH, row_perc=False) def ws_28(self, ws): """ Cols: Medium Rows: Region :: Female reporters only """ counts = Counter() for media_type, model in person_models.iteritems(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values(region)\ .filter(sex=1)\ .filter(**{region + '__in': self.region_list})\ .annotate(n=Count('id')) for row in rows: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [r[0] for r in self.regions if r[1] == row[region]][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_29(self, ws): """ Cols: Regions Rows: Scope :: Female reporters only """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' scope = sheet_name + '__scope' if 'scope' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, scope)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row[region]][0] counts.update({(region_id, row[scope]): row['n']}) self.tabulate(ws, counts, self.regions, SCOPE, row_perc=False) def ws_30(self, ws): """ Cols: Region Rows: Major Topics :: Female reporters only """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, topic)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row[region]][0] major_topic = TOPIC_GROUPS[row[topic]] counts.update({(region_id, major_topic): row['n']}) self.tabulate(ws, counts, self.regions, MAJOR_TOPICS, row_perc=False) def ws_31(self, ws): """ Cols: Sex of Reporter Rows: Minor Topics """ counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[topic]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_32(self, ws): """ Cols: Medium Rows: Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=1)\ .annotate(n=Count('id')) for row in rows: media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row[topic]): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, TOPICS, row_perc=False) def ws_34(self, ws): """ Cols: Sex of reporter Rows: Sex of subject """ counts = Counter() for model in person_models.itervalues(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of subject' self.tabulate(ws, counts, self.male_female, GENDER, row_perc=True, display_cols=self.female) def ws_35(self, ws): """ Cols: Sex of reporter Rows: Age of reporter :: Only for television """ counts = Counter() rows = TelevisionJournalist.objects\ .values('sex', 'age')\ .filter(television_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True, display_cols=self.female) def ws_36(self, ws): """ Cols: Sex of Reporter Rows: Focus: about women """ counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() about_women = sheet_name + '__about_women' if 'about_women' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', about_women)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) counts.update({(r['sex'], r[about_women]): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=True) def ws_38(self, ws): """ Cols: Focus: about women Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['about_women'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_39(self, ws): """ Cols: Focus: about women Rows: Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=True) def ws_40(self, ws): """ Cols: Region, Topics Rows: Focus: about women """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'about_women')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=2, display_cols=self.yes) def ws_41(self, ws): """ Cols: Equality rights raised Rows: Topics """ counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=False) def ws_42(self, ws): """ Cols: Region, Equality rights raised Rows: Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_43(self, ws): """ Cols: Sex of reporter, Equality rights raised Cols: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic, equality_rights)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=gender_id)\ .annotate(n=Count('id')) counts.update({(r[equality_rights], r[topic]): r['n'] for r in rows}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_44(self, ws): """ Cols: Sex of reporter, Equality rights raised Rows: Region """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(equality_rights, region)\ .filter(sex=gender_id)\ .filter(**{region + '__in':self.region_list})\ .annotate(n=Count('id')) for r in rows: region_id = [id for id, name in self.regions if name == r[region]][0] counts.update({(r[equality_rights], region_id): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.regions, row_perc=False, sec_cols=4) def ws_45(self, ws): """ Cols: Sex of news subject Rows: Region :: Equality rights raised == Yes """ counts = Counter() for model in person_models.itervalues(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): region = model.sheet_name() + '__country_region__region' equality_rights = model.sheet_name() + '__equality_rights' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in':self.region_list})\ .filter(**{equality_rights:'Y'})\ .annotate(n=Count('id')) for r in rows: region_id = [id for id, name in self.regions if name == r[region]][0] counts.update({(r['sex'], region_id): r['n']}) self.tabulate(ws, counts, self.male_female, self.regions, row_perc=True) def ws_46(self, ws): """ Cols: Region, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for model in sheet_models.itervalues(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country_region__region=region)\ .annotate(n=Count('id')) for r in rows: counts.update({(TOPIC_GROUPS[r['topic']], r['stereotypes']): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True, sec_cols=8) def ws_47(self, ws): """ Cols: Stereotypes Rows: Major Topics """ counts = Counter() for model in sheet_models.itervalues(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True) def ws_48(self, ws): """ Cols: Sex of reporter, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for model in journalist_models.itervalues(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' stereotypes = sheet_name + '__stereotypes' if 'stereotypes' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(stereotypes, topic)\ .filter(sex=gender_id)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) for r in rows: counts.update({(r[stereotypes], TOPIC_GROUPS[r[topic]]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=False, sec_cols=8) def ws_53(self, ws): """ Cols: Topic Rows: Country :: Internet media type only :: Female reporters only """ display_cols = [(id, value) for id, value in GENDER if id==1] secondary_counts = OrderedDict() model = sheet_models.get('Internet News') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() journo_sex_field = '%s__sex' % model.journalist_field_name() rows = model.objects\ .values(journo_sex_field, 'country')\ .filter(topic__in=topic_ids)\ .annotate(n=Count('id')) counts.update({(r[journo_sex_field], r['country']): r['n'] for r in rows}) secondary_counts[major_topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, display_cols=display_cols, sec_cols=2) def ws_54(self, ws): """ Cols: Major Topic, sex of subject Rows: Country :: Internet media type only """ secondary_counts = OrderedDict() model = person_models.get('Internet News') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('sex', country_field)\ .filter(**{model.sheet_name() + '__topic__in':topic_ids})\ .annotate(n=Count('id')) counts.update({(r['sex'], r[country_field]): r['n'] for r in rows}) secondary_counts[major_topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, sec_cols=8) def ws_55(self, ws): """ Cols: Occupation Rows: Country :: Show all countries :: Only female subjects :: Internet media type only """ counts = Counter() model = person_models.get('Internet News') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'occupation')\ .filter(sex=1)\ .annotate(n=Count('id')) counts.update({(r['occupation'], r[country_field]): r['n'] for r in rows}) self.tabulate(ws, counts, OCCUPATION, self.countries, row_perc=True) def ws_56(self, ws): """ Cols: Function Rows: Country :: Show all countries :: Internet media type only """ counts = Counter() model = person_models.get('Internet News') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'function')\ .annotate(n=Count('id')) counts.update({(r['function'], r[country_field]): r['n'] for r in rows}) self.tabulate(ws, counts, FUNCTION, self.countries, row_perc=True) def ws_57(self, ws): """ Cols: Sex of subject Rows: Country, Family role :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['family_role']): row['n'] for row in rows} # If only captured countries should be displayed use # if counts.keys(): self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_58(self, ws): """ Cols: Sex of subject Rows: Country, is photographed :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['is_photograph']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, IS_PHOTOGRAPH, row_perc=True, sec_row=True, r=r) r += len(IS_PHOTOGRAPH) def ws_59(self, ws): """ Cols: Sex of reporter Rows: Sex of subject :: Internet media only """ counts = Counter() model = person_models.get('Internet News') sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .annotate(n=Count('id')) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) self.tabulate(ws, counts, GENDER, GENDER, row_perc=False) def ws_60(self, ws): """ Cols: Sex of subject Rows: Country, age :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['age']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, AGES, row_perc=True, sec_row=True, r=r) r += len(AGES) def ws_61(self, ws): """ Cols: Sex of subject Rows: Country, is_quoted :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts = {(row['sex'], row['is_quoted']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_62(self, ws): """ Cols: Topic Rows: Country, equality raised :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['equality_rights']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_63(self, ws): """ Cols: Topic Rows: Country, stereotypes challenged :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_64(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet News') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_65(self, ws): """ Cols: Topic Rows: Country, tweet or retweet :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'retweet')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['retweet']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, RETWEET, row_perc=False, sec_row=True, r=r) r += len(RETWEET) def ws_66(self, ws): """ Cols: Topic Rows: Country, sex of news subject :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Twitter') topic_field = '%s__topic' % model.sheet_name() for code, country in self.countries: rows = model.objects\ .values(topic_field, 'sex')\ .filter(**{model.sheet_name() + '__country':code})\ .annotate(n=Count('id')) counts.update({(row[topic_field], row['sex']): row['n'] for row in rows}) self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, GENDER, row_perc=True, sec_row=True, r=r) r += len(GENDER) def ws_67(self, ws): """ Cols: Topic Rows: Country :: Only female journalists :: Show all countries :: Twitter media type only """ counts = Counter() model = sheet_models.get('Twitter') rows = model.objects\ .values('topic', 'country')\ .filter(**{model.journalist_field_name() + '__sex':1})\ .annotate(n=Count('id')) counts.update({(row['topic'], row['country']): row['n'] for row in rows}) self.tabulate(ws, counts, TOPICS, self.countries, row_perc=True, sec_row=False) def ws_68(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=False, sec_row=True, r=r) r += len(YESNO) def ws_69(self, ws): """ Cols: Topic Rows: Country, stereotypes :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code)\ .annotate(n=Count('id')) counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_76(self, ws): """ Cols: Topic, Stereotypes Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in sheet_models.itervalues(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'country')\ .filter(topic=topic_id)\ .annotate(n=Count('id')) counts.update({(r['stereotypes'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, self.countries, row_perc=True, sec_cols=8) def ws_77(self, ws): """ Cols: Topic, Reference to gender equality Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in sheet_models.itervalues(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'country')\ .filter(topic=topic_id)\ .annotate(n=Count('id')) counts.update({(r['equality_rights'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.countries, row_perc=True, sec_cols=4) def ws_78(self, ws): """ Cols: Topic, victim_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in person_models.itervalues(): if 'victim_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('victim_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id})\ .annotate(n=Count('id')) counts.update({(r['victim_of'], r[country_field]): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, VICTIM_OF, self.countries, row_perc=True, sec_cols=18) def ws_79(self, ws): """ Cols: Topic, survivor_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for model in person_models.itervalues(): if 'survivor_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('survivor_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id})\ .annotate(n=Count('id')) counts.update({(r['survivor_of'], r[country_field]): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, SURVIVOR_OF, self.countries, row_perc=True, sec_cols=18) # ------------------------------------------------------------------------------- # Helper functions # def write_headers(self, ws, title, description): """ Write the headers to the worksheet """ ws.write(0, 0, title, self.heading) ws.write(1, 0, description, self.heading) ws.write(3, 2, self.gmmp_year, self.heading) def write_col_headings(self, ws, cols, c=2, r=4): """ :param ws: worksheet to write to :param cols: list of `(col_id, col_title)` tuples of column ids and titles :param r, c: initial position where cursor should start writing to """ for col_id, col_title in cols: ws.write(r, c, clean_title(col_title), self.col_heading) ws.write(r + 1, c, "N") ws.write(r + 1, c + 1, "%") c += 2 def write_primary_row_heading(self, ws, heading, c=0, r=6): """ :param ws: worksheet to write to :param heading: row heading to write :param r, c: position where heading should be written to """ ws.write(r, c, clean_title(heading), self.heading) def tabulate_secondary_cols(self, ws, secondary_counts, cols, rows, row_perc=False, display_cols=None, sec_cols=4): """ :param ws: worksheet to write to :param secondary_counts: dict in following format: {'Primary column heading': Count object, ...} :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_cols: amount of cols needed for secondary cols """ r, c = 7, 1 # row titles for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 if 'col_title_def' in secondary_counts: # Write definitions of column heading titles ws.write(r-3, c-1, secondary_counts['col_title_def'][0], self.sec_col_heading_def) ws.write(r-2, c-1, secondary_counts['col_title_def'][1], self.col_heading_def) secondary_counts.pop('col_title_def') for field, counts in secondary_counts.iteritems(): ws.merge_range(r-3, c, r-3, c+sec_cols-1, clean_title(field), self.sec_col_heading) self.tabulate(ws, counts, cols, rows, row_perc=row_perc, sec_col=True, display_cols=display_cols, r=7, c=c) c += sec_cols def tabulate(self, ws, counts, cols, rows, row_perc=False, sec_col=False, sec_row=False, display_cols=None, c=1, r=6): """ Emit a table. :param ws: worksheet to write to :param dict counts: dict from `(col_id, row_id)` tuples to count for that combination. :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_col: Are wecreating a secondary column title(default: False) :param sec_row: Are we creating a secondary row title(default: False) :param display_cols: Optional if only a subset of columns should be displayed e.g. only female :param r, c: initial position where cursor should start writing to """ if row_perc: # we'll need percentage by rows row_totals = {} for row_id, row_title in rows: row_totals[row_id] = sum(counts.get((col_id, row_id), 0) for col_id, _ in cols) # noqa # row titles if not sec_col: # Else already written for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 # if only filtered results should be shown # e.g. only print female columns if display_cols: cols = display_cols if 'col_title_def' in counts and not sec_row: ws.write(r - 2, c-1, counts['col_title_def'], self.col_heading_def) counts.pop('col_title_def') # values, written by column for col_id, col_heading in cols: # column title if not sec_row: ws.merge_range(r-2, c, r-2, c+1, clean_title(col_heading), self.col_heading) ws.write(r - 1, c, "N", self.label) ws.write(r - 1, c + 1, "%", self.label) if not row_perc: # column totals # Confirm: Perc of col total or matrix total? # total = sum(counts.itervalues()) total = sum(counts.get((col_id, row_id), 0) for row_id, _ in rows) # row values for this column for i, row in enumerate(rows): row_id, row_title = row if row_perc: # row totals total = row_totals[row_id] n = counts.get((col_id, row_id), 0) ws.write(r + i, c, n, self.N) ws.write(r + i, c + 1, p(n, total), self.P) c += 2
__script__.title = 'KKB Measurement Script' __script__.version = '2.1' from gumpy.commons import sics from org.gumtree.gumnix.sics.control import ServerStatus from pickle import Pickler, Unpickler import time from math import log as ln from math import exp, isnan, isinf, sin from __builtin__ import max as builtin_max from __builtin__ import min as builtin_min from org.eclipse.swt.widgets import FileDialog from org.eclipse.swt import SWT from org.eclipse.swt.widgets import Display from java.io import File import time from Internal import sample_stage ''' Disable dataset caching ''' DatasetFactory.__cache_enabled__ = False SINGLE_TYPE = SWT.SINGLE SAVE_TYPE = SWT.SAVE MULTI_TYPE = SWT.MULTI class __Display_Runnable__(Runnable): def __init__(self, type=SINGLE_TYPE, ext=['*.*']): self.filename = None self.filenames = None self.path = None self.type = type self.ext = ext def run(self): global __UI__ dialog = FileDialog(__UI__.getShell(), self.type); dialog.setFilterExtensions(self.ext) dialog.open() self.filename = dialog.getFilterPath() + File.separator + dialog.getFileName() self.filenames = dialog.getFileNames() self.path = dialog.getFilterPath() def open_file_dialog(type=SWT.SINGLE, ext=['*.*']): __display_run__ = __Display_Runnable__(type, ext) Display.getDefault().asyncExec(__display_run__) while __display_run__.filename is None: time.sleep(0.5) if type == SWT.MULTI: fns = [] for fn in __display_run__.filenames: fns.append(__display_run__.path + '/' + fn) return fns return __display_run__.filename # # templates reference_templates_dict = {} reference_templates_dict['Si111'] = 180.3565 reference_templates_dict['Si311'] = -0.4100 steps_templates_list = [] # steps_templates['Background'] = [ # 'time', 'logscale', # [20, 6.0e-5, 1200, 1200]] # steps_templates['----------'] = [ # 'time', 'logscale', # [0, 0, 0, 0]] steps_templates_list.append([ 'Si111: Logarithmic Overview Scan', 'time', 'logscale', [17, 1.20e-4, 1, 1200], [30, 22.0, 20, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (few features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [34, 20.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (fine features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [65, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Taiki Scan (15 points)', 'ba', 'logscale', [2, 6.0e-5, 1000, 60], [1, 10000, 1000, 60], [10, 25, 1000, 60]]) ''' steps_templates_list.append([ 'Si111: Kinetic Scan 4 points', 'time', 'logscale', [ 0, 6.0e-5, 1, 1200], [1, 5.0e-3, 180, 1200], [3, 1.5e-2, 180, 1200]]) ''' steps_templates_list.append([ '----------', 'time', 'logscale', [0, 0, 0, 0]]) steps_templates_list.append([ 'Si311: Logarithmic Overview Scan', 'time', 'logscale', [17, 2.0e-5, 1, 1200], [30, 23.0, 20, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 80+29)', 'ba', 'logscale', [80, 2e-5, 1000, 1200], [29, 15.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 40+33)', 'ba', 'logscale', [40, 2e-5, 1000, 1200], [33, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, Taiki)', 'ba', 'logscale', [33, 2e-5, 1000, 1200], [25, 20.0, 1000, 1200]]) ret = sample_stage.check_declarations() if not ret[0] : open_warning(ret[1]) reload(sample_stage) SAMPLE_STAGES = sample_stage.StagePool() # # export path __EXPORT_PATH__ = 'V:/shared/KKB Logbook/Temp Plot Data Repository/' if not os.path.exists(__EXPORT_PATH__): os.makedirs(__EXPORT_PATH__) # # User Details user_name = Par('string', 'Christine', options=['Christine', 'Lela', 'Jitendra']) user_name.title = 'Name' user_email = Par('string', 'cre@ansto.gov.au', options=['cre@ansto.gov.au', 'liliana.decampo@ansto.gov.au', 'jtm@ansto.gov.au']) user_email.title = 'EMail' g0 = Group('User Details') g0.numColumns = 2 g0.add(user_name, user_email) # # Sample Details sample_name = Par('string', 'UNKNOWN', options=['Empty Cell', 'Empty Beam'], command="sample_thickness.enabled = sample_name.value not in ['Empty Cell', 'Empty Beam']") sample_name.title = 'Name' sample_description = Par('string', 'UNKNOWN') sample_description.title = 'Description' sample_thickness = Par('string', '1', options=['0.01', '0.1', '1.0', '10.0']) sample_thickness.title = 'Thickness (mm)' g0 = Group('Sample Details') g0.numColumns = 2 g0.add(sample_name, sample_thickness, sample_description) # Group('Sample Details').add(sample_name, sample_description, sample_thickness) # # Crystal crystal_name = Par('string', 'UNKNOWN') crystal_name.title = 'Name' crystal_name.enabled = False try: m2om = sics.getValue('/instrument/crystal/m2om').getFloatData() if m2om > 90: crystal_name.value = 'Si111 (4.74 Angstroms)' else: crystal_name.value = 'Si311 (2.37 Angstroms)' except: pass g0 = Group('Crystal Info') g0.numColumns = 2 g0.add(crystal_name) # CRYSTAL END ############################################# # SLIT 1 ####################################################################### def updateOffset(gapBox, offsetBox): offsetBox.enabled = 'fully' not in gapBox.value def getSlitGapAndOffset(aPath, a0, bPath, b0): try: a = sics.getValue(aPath).getFloatData() b = sics.getValue(bPath).getFloatData() gap = (a - a0 - (b - b0)) / 1.0 offset = (a - a0 + (b - b0)) / 2.0 return (gap, offset) except: return (float('nan'), float('nan')) crystal = str(crystal_name.value) if 'Si111' in crystal: ss1r0 = 28.35 ss1l0 = 27.75 elif 'Si311' in crystal: ss1r0 = -9.16 ss1l0 = -9.76 else: ss1r0 = float('nan') ss1l0 = float('nan') ss1u0 = -8.04 ss1d0 = -7.30 (ss1vg, ss1vo) = getSlitGapAndOffset('/instrument/slits/ss1u', ss1u0, '/instrument/slits/ss1d', ss1d0) (ss1hg, ss1ho) = getSlitGapAndOffset('/instrument/slits/ss1r', ss1r0, '/instrument/slits/ss1l', ss1l0) pss_ss1vg = Par('string', '%.1f' % ss1vg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1vg, pss_ss1vo)') pss_ss1vg.title = 'Vertical Gap (mm)' # pss_ss1vg.colspan = 50 pss_ss1vo = Par('float', ss1vo) pss_ss1vo.title = 'Vertical Offset (mm)' # pss_ss1vo.colspan = 50 pss_ss1hg = Par('string', '%.1f' % ss1hg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1hg, pss_ss1ho)') pss_ss1hg.title = 'Horizontal Gap (mm)' # pss_ss1hg.colspan = 50 pss_ss1ho = Par('float', ss1ho) pss_ss1ho.title = 'Horizontal Offset (mm)' # pss_ss1ho.colspan = 50 g0 = Group('Sample Slit Settings') g0.numColumns = 2 g0.add(pss_ss1vg, pss_ss1vo, pss_ss1hg, pss_ss1ho) # SLIT 1 END ####################################################################### ## Scan parameters ########################################################################################################## scan_variable = Par('string', 'm2om [deg]', options=[ #'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]'], command="scan_variable_plot.value = scan_variable.value") scan_variable.title = 'Scan Variable' scan_variable.colspan = 25 scan_reference = Par('float', '0.0') scan_reference.title = 'Zero Angle' scan_reference.colspan = 25 for key in reference_templates_dict.keys(): if key in crystal_name.value: scan_reference.value = reference_templates_dict[key] scan_mode = Par('string', 'ba', options=['ba', 'time'], command='setScanMode()') scan_mode.title = 'Acquisition Mode' scan_mode.colspan = 25 scan_min_time = Par('int', '5') scan_min_time.title = 'Min Time (sec)' scan_min_time.colspan = 25 empty_label = Par('label', '') empty_label.colspan = 25 scan_sample_stage = Par('string', '', command = 'sample_stage_changed()') scan_sample_stage.colspan = 25 scan_sample_stage.title = 'Sample Stage' scan_sample_stage.options = SAMPLE_STAGES.get_stage_names() current_stage = SAMPLE_STAGES.get_stage_in_service() if not current_stage is None: scan_sample_stage.value = current_stage.get_name() scan_sample_position = Par('string', 'fixed') scan_sample_position.title = 'Sample Position' scan_sample_position.colspan = 25 scan_sample_position.options = ['fixed', '----------'] if not current_stage is None: scan_sample_position.options += current_stage.get_sample_indexes() logscale_position = Par('bool', False, command='setStepTitles()') logscale_position.title = 'Logarithmic Steps' logscale_position.colspan = 25 negative_steps = Par('bool', False) negative_steps.title = 'Negative Steps' negative_steps.colspan = 25 steps_label = Par('label', 'Please choose scan template or adjust steps manually: ') steps_label.colspan = 200 steps_templates = Par('string', '', options=[item[0] for item in steps_templates_list], command='setTemplate()') steps_templates.title = 'Scan Template' steps_templates.colspan = 100 early_exit_enabled = Par('bool', True, command = "set_early_exit_enabled()") early_exit_enabled.title = "Enable Early Exit" early_exit_enabled.colspan = 25 background_frames = Par('int', 3) background_frames.title = 'Background Frames' background_frames.colspan = 25 background_threshold = Par('float', 0.26) background_threshold.title = 'Background Threshold' background_threshold.colspan = 25 # steps_space = Par('space', '') # steps_space.colspan = 10 g0 = Group('Scan Parameters') g0.numColumns = 100 # 9 g0.add(scan_variable, scan_mode, scan_reference, early_exit_enabled, \ logscale_position, scan_min_time, scan_sample_stage, background_frames, \ negative_steps, empty_label, scan_sample_position, background_threshold, \ steps_label, steps_templates) def sample_stage_changed(): stage = SAMPLE_STAGES.get_stage_by_name(str(scan_sample_stage.value)) # scan_sample_position.value = 'fixed' if not stage is None: scan_sample_position.options = ['fixed', '----------'] + stage.get_sample_indexes() else: scan_sample_position.options = ['fixed', '----------'] def set_early_exit_enabled(): if early_exit_enabled.value: background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False stepInfo = [] for i in xrange(4): steps_e = Par('bool', True, command='setEnabled(%i)' % i) steps_e.title = '(%i)' % (i + 1) steps_e.colspan = 10 steps_m = Par('int', 0, command='clearScanTemplateSelection()') steps_m.title = 'Number of points' steps_m.colspan = 20 steps_s = Par('float', 0, command='clearScanTemplateSelection()') steps_s.title = 'Step Size [deg]' steps_s.colspan = 20 steps_p = Par('int', 0, command='clearScanTemplateSelection()') steps_p.title = 'Mode Preset' steps_p.colspan = 25 steps_t = Par('int', 1200, command='clearScanTemplateSelection()') steps_t.title = 'Max Time' steps_t.colspan = 25 stepInfo.append({'enabled': steps_e, 'dataPoints':steps_m, 'stepSize':steps_s, 'preset':steps_p, 'maxTime':steps_t}) g0.add(steps_e, steps_m, steps_s, steps_p, steps_t) def clearScanTemplateSelection(): steps_templates.value = None btnPlotSteps = Act('btnPlotSteps_clicked()', 'Plot Measurement Steps') # 'compare measurement steps with previous scan') btnPlotSteps.colspan = 50 cnfg_save_btn = Act('saveConfiguration()', 'Save Single Scan Parameters') cnfg_save_btn.colspan = 50 btnTimeEstimation = Act('runTimeEstimation()', 'Time Estimation with selected Data Set') btnTimeEstimation.colspan = 50 txtTimeEstimation = Par('int', '0') txtTimeEstimation.title = 'Time Estimation (min)' txtTimeEstimation.enabled = False txtTimeEstimation.colspan = 50 g0.add(btnPlotSteps, cnfg_save_btn, btnTimeEstimation, txtTimeEstimation) def runTimeEstimation(): if str(scan_mode.value) == 'time': scan = getScan() times = scan['presets'] txtTimeEstimation.value = int((sum(times) + len(times) * 25) / 60.0) # 25 seconds for each move return fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return ds = openDataset(fns[0]) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time data[:] = [data[item[0]] for item in info] # sorting # angle and count rate a0 = [float(angle) for angle in scanVariable] r0 = [float(rate) for rate in data[:]] # angle, counts, max time and min time model = ConfigurationModel() scan = model.scan tMin = model.min_time a1 = scan['angles'] c1 = scan['presets'] t1 = scan['maxTimes'] total = 0.0 for i in xrange(len(a1)): try: rate = sample(a0, r0, a1[i]) time = c1[i] / rate if time < tMin: total += tMin elif time > t1[i]: total += t1[i] else: total += time except ValueError as e: if e.message == "OutOfRange": total += t1[i] # add max time else: raise txtTimeEstimation.value = int(total / 60.0) def sample(x0, y0, x1): from __builtin__ import max, min if len(x0) != len(y0): raise Exception("len(x0) != len(y0)") x0_min = min(x0) x0_max = max(x0) if len(x0) < 2: raise Exception("len(x0) < 2") if x0_min >= x0_max: raise Exception("x0_min >= x0_max") if x1 < x0_min: raise ValueError("OutOfRange") if x0_max < x1: raise ValueError("OutOfRange") i0 = 0 i1 = 1 x0i0 = x0[i0] y0i0 = y0[i0] x0i1 = x0[i1] y0i1 = y0[i1] # in case first x values are equal while x0i0 == x0i1: i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] # not iterable while x0i1 < x1: x0i0 = x0i1 y0i0 = y0i1 i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] return y0i0 + (x1 - x0i0) * (y0i1 - y0i0) / (x0i1 - x0i0) ## Scan parameters END ######################################################################### ## RUN ############################################## cnfg_load_btn = Act('loadConfigurations()', 'Load Multiple Scan Parameters') cnfg_lookup = dict() cnfg_options = Par('string', '', options=[''], command="applyConfiguration()") cnfg_options.title = 'Read' start_scan = Act('startScan(ConfigurationModel())', '############# Run Single Scan #############') cnfg_run_btn = Act('runConfigurations()', '############# Run Multiple Scans #############') g0 = Group('Execute Scans') g0.numColumns = 1 g0.add(start_scan, cnfg_load_btn, cnfg_options, cnfg_run_btn) ## Save/Load Configuration END############################################################################ def saveConfiguration(): file = open_file_dialog(type=SAVE_TYPE, ext=['*.kkb']) try: fh = open(file, 'w') except: print 'not saved' return try: p = Pickler(fh) # header p.dump('KKB') # content model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): p.dump(att) p.dump(att_value) print 'saved' finally: fh.close() def loadConfigurations(): fileList = open_file_dialog(type=MULTI_TYPE, ext=['*.kkb']) if not fileList: return finalDict = dict() finalNames = [] for path in fileList: fh = open(path, 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', os.path.basename(path) else: model = ConfigurationModel() # set defaults model.negative = False # old models may not have this attribute for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', os.path.basename(path) break setattr(model, att, p.load()) else: name = os.path.basename(path) finalDict[name] = path finalNames.append(name) finally: fh.close() cnfg_lookup.clear() cnfg_lookup.update(finalDict) cnfg_options.value = finalNames[0] if finalNames else '' cnfg_options.options = finalNames # time.sleep(0.5) def applyConfiguration(): file = str(cnfg_options.value) if not file: return fh = open(cnfg_lookup[file], 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: # print 'read:', file model.apply() finally: fh.close() def runConfigurations(): for file in cnfg_options.options: fh = open(cnfg_lookup[file], 'r') try: cnfg_options.command = '' cnfg_options.value = file applyConfiguration() p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: print 'run:', file startScan(model) finally: cnfg_options.command = 'applyConfiguration()' fh.close() # # Plot tubes_label = Par('label', 'Main Detector:') tubes_label.colspan = 1 combine_tube0 = Par('bool', True) combine_tube0.title = ' Tube 0' combine_tube0.colspan = 1 combine_tube1 = Par('bool', True) combine_tube1.title = ' Tube 1' combine_tube1.colspan = 1 combine_tube2 = Par('bool', True) combine_tube2.title = ' Tube 2' combine_tube2.colspan = 1 combine_tube3 = Par('bool', True) combine_tube3.title = ' Tube 3' combine_tube3.colspan = 1 combine_tube4 = Par('bool', True) combine_tube4.title = ' Tube 4' combine_tube4.colspan = 1 combine_tube6 = Par('bool', False) combine_tube6.title = ' Tube 6' combine_tube6.colspan = 1 combine_mode = Par('string', 'combined', options=['individual', 'combined']) combine_mode.title = ' Mode' combine_mode.colspan = 1 trans_tube_label = Par('label', 'Trans Detector: ') trans_tube_label.colspan = 2 check_tube9 = Par('bool', True) check_tube9.title = ' Tube 9: Si (311)' check_tube9.colspan = 2 check_tube10 = Par('bool', False) check_tube10.title = ' Tube 10: Si (111)' check_tube10.colspan = 2 # steps_space = Par('space', '') # steps_space.colspan = 12 scan_variable_plot = Par('string', 'm2om [deg]', options=[ 'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]']) scan_variable_plot.title = 'Scan Variable' scan_variable_plot.colspan = 1 scan_variable_sorting = Par('bool', True) scan_variable_sorting.title = 'Sorting' scan_variable_sorting.colspan = 1 btnPlot = Act('btnPlot_clicked()', 'Plot Selected Data Set') btnPlot.colspan = 8 g0 = Group('Plotting') g0.numColumns = 7 g0.add(tubes_label, combine_tube0, combine_tube1, combine_tube2, combine_tube3, combine_tube4, combine_tube6, combine_mode, trans_tube_label, check_tube9, check_tube10, scan_variable_plot, scan_variable_sorting, btnPlot) # export to csv # btnExport = Act('export_clicked()', 'Export to CSV') ################################# SLIT 2 ########################################################## ss2u0 = 0 # 2.00 ss2d0 = 0 # 1.40 ss2l0 = 0 # 5 0.50 ss2r0 = 0 # -2 -1.00 (ss2vg, ss2vo) = getSlitGapAndOffset('/instrument/slits/ss2u', ss2u0, '/instrument/slits/ss2d', ss2d0) (ss2hg, ss2ho) = getSlitGapAndOffset('/instrument/slits/ss2r', ss2r0, '/instrument/slits/ss2l', ss2l0) pss_ss2vg = Par('string', '%.1f' % ss2vg, options=pss_ss1vg.options, command='updateOffset(pss_ss2vg, pss_ss2vo)') pss_ss2vg.title = 'Vertical Opening (mm)' pss_ss2vo = Par('float', ss2vo) pss_ss2vo.title = 'Vertical Offset (mm)' pss_ss2hg = Par('string', '%.1f' % ss2hg, options=pss_ss1hg.options, command='updateOffset(pss_ss2hg, pss_ss2ho)') pss_ss2hg.title = 'Horizontal Opening (mm)' pss_ss2ho = Par('float', ss2ho) pss_ss2ho.title = 'Horizontal Offset (mm)' g0 = Group('Post-Sample Slit') g0.numColumns = 2 g0.add(pss_ss2vg, pss_ss2vo, pss_ss2hg, pss_ss2ho) ################################# SLIT 2 END ########################################################## def setStepTitles(): if logscale_position.value: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Factor [%]" else: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Size [deg]" __UI__.updateUI() def setTemplate(): try: matches = [item for item in steps_templates_list if item[0] == steps_templates.value] if len(matches) != 1: steps_templates.value = None return template = matches[0] # ignore '----' if template[0][0] == '-': steps_templates.value = None return scan_mode.value = template[1] if template[2] == 'logscale': logscale_position.value = True elif template[2] == 'linear': logscale_position.value = False setStepTitles() # by default templates measure in positive direction negative_steps.value = False setScanMode() headers = 3 for i in xrange(len(template) - headers): templateItem = template[i + headers] stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = True stepInfoItem['dataPoints'].enabled = True stepInfoItem['dataPoints'].value = templateItem[0] stepInfoItem['stepSize' ].enabled = True stepInfoItem['stepSize' ].value = templateItem[1] stepInfoItem['preset' ].enabled = True stepInfoItem['preset' ].value = templateItem[2] stepInfoItem['maxTime' ].enabled = scan_min_time.enabled stepInfoItem['maxTime' ].value = templateItem[3] for i in xrange(len(template) - headers, len(stepInfo)): stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = False stepInfoItem['dataPoints'].enabled = False stepInfoItem['stepSize' ].enabled = False stepInfoItem['preset' ].enabled = False stepInfoItem['maxTime' ].enabled = False except: pass def setScanMode(): if scan_mode.value == 'time': scan_min_time.enabled = False for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = False else: scan_min_time.enabled = True for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = stepInfoItem['enabled'].value def setEnabled(index): steps_templates.value = None stepItem = stepInfo[index] value = stepItem['enabled'].value stepItem['dataPoints'].enabled = value stepItem['stepSize' ].enabled = value stepItem['preset' ].enabled = value stepItem['maxTime' ].enabled = value and scan_min_time.enabled setTemplate() def getScan(): scan = { 'angles': [], 'presets': [], 'maxTimes': [], 'groups': [] } first = True angle_ref = scan_reference.value angle = angle_ref logscale = False # first data points are always on a linear scale negative = bool(negative_steps.value) for stepInfoItem in stepInfo: if stepInfoItem['enabled'].value: dataPoints = stepInfoItem['dataPoints'].value stepSize = stepInfoItem['stepSize' ].value preset = stepInfoItem['preset' ].value maxTime = stepInfoItem['maxTime' ].value if (dataPoints > 0) and (stepSize <= 0.0): raise Exception('step sizes have to be positive') for i in xrange(dataPoints): if first and (i == 0): angle -= ((dataPoints - 1) / 2.0) * stepSize; elif logscale: # for logscale stepSize is a stepFactor angle = angle_ref + (angle - angle_ref) * (1.0 + 0.01 * stepSize) else: angle += stepSize #print angle scan['angles' ].append(angle) scan['presets' ].append(preset) scan['maxTimes'].append(maxTime) if i == 0: scan['groups'].append(angle) first = False logscale = bool(logscale_position.value) if negative: # negate angles with reference to zero angle scan['angles'] = [angle_ref - (angle - angle_ref) for angle in scan['angles']] return scan def wait_for_idle(): c_time = time.time() while not sics.getSicsController().getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): time.sleep(0.1) if time.time() - c_time > 5: serverStatus = sics.get_status() c_time = time.time() def startScan(configModel): ''' check instrument ready ''' is_ready = False try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' except: pass if not is_ready: is_confirmed = open_question('The instrument is not ready '\ + 'according to the SIS status. Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?') if not is_confirmed: slog('Instrument is not ready. Quit the scan.') return else: try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' except: pass if not is_ready: slog('scan continued without instrument ready') ''' setup ''' scanVariable = configModel.scanVariable crystal = configModel.crystal mode = configModel.mode MainDeadTime = 1.08E-6 TransDeadTime = 1.08E-6 if 'Si111' in crystal: empLevel = 0.3 bkgLevel = 0.21 dOmega = 2.3E-6 gDQv = 0.0586 gDQh = 0 wavelength = 4.74 TransmissionTube = 10 TransBackground = 0 # counts per second elif 'Si311' in crystal: empLevel = 0.34 bkgLevel = 0.21 dOmega = 4.6E-7 gDQv = 0.117 gDQh = 0 wavelength = 2.37 TransmissionTube = 9 TransBackground = 0 # counts per second else: print 'selected crystal is invalid' return ''' angles ''' scan = configModel.scan scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if ('m1om' in scanVariable) or ('m2om' in scanVariable): tolerance = 6 approved = False if 'Si111' in crystal: if (180 - tolerance <= scan_angleMin) and (scan_angleMax <= 180 + tolerance): approved = True elif 'Si311' in crystal: if (0 - tolerance <= scan_angleMin) and (scan_angleMax <= 0 + tolerance): approved = True if not approved: print 'angle out of range' return ''' execution ''' sics.execute('hset user/name ' + configModel.user_name) sics.execute('hset user/email ' + configModel.user_email) sics.execute('hset sample/name ' + configModel.sample_name) sics.execute('hset sample/description ' + configModel.sample_description) sics.execute('hset sample/thickness %g' % configModel.sample_thickness) sics.execute('hset experiment/bkgLevel %g' % bkgLevel) sics.execute('hset experiment/empLevel %g' % empLevel) sics.execute('hset instrument/detector/MainDeadTime %g' % MainDeadTime) sics.execute('hset instrument/detector/TransDeadTime %g' % TransDeadTime) sics.execute('hset instrument/detector/TransBackground %g' % TransBackground) sics.execute('hset instrument/detector/TransmissionTube %i' % TransmissionTube) sics.execute('hset instrument/crystal/dOmega %g' % dOmega) sics.execute('hset instrument/crystal/gDQv %g' % gDQv) sics.execute('hset instrument/crystal/gDQh %g' % gDQh) sics.execute('hset instrument/crystal/wavelength %g' % wavelength) sics.execute('hset instrument/crystal/scan_variable ' + scanVariable); sicsController = sics.getSicsController() # slits def getSlitValues(gap, offset, a0, b0, aOpen, bOpen): if gap == 'fully opened': return (aOpen, bOpen) if gap == 'fully closed': gap = -5.0 offset = 0.0 a = a0 + 0.5 * float(gap) + float(offset) b = b0 - 0.5 * float(gap) + float(offset) return (a, b) ss1vg = configModel.ss1vg ss1vo = configModel.ss1vo ss1hg = configModel.ss1hg ss1ho = configModel.ss1ho ss2vg = configModel.ss2vg ss2vo = configModel.ss2vo ss2hg = configModel.ss2hg ss2ho = configModel.ss2ho (ss1u, ss1d) = getSlitValues(ss1vg, ss1vo, ss1u0, ss1d0, 35.8, -38.8) (ss1r, ss1l) = getSlitValues(ss1hg, ss1ho, ss1r0, ss1l0, 57.0, -58.0) (ss2u, ss2d) = getSlitValues(ss2vg, ss2vo, ss2u0, ss2d0, 37.0, -39.5) (ss2r, ss2l) = getSlitValues(ss2hg, ss2ho, ss2r0, ss2l0, 35.0, -35.0) # apply slits run = {} run['ss1u'] = ss1u run['ss1d'] = ss1d run['ss1r'] = ss1r run['ss1l'] = ss1l run['ss2u'] = ss2u run['ss2d'] = ss2d run['ss2r'] = ss2r run['ss2l'] = ss2l # sics.multiDrive(run) dc = 'drive' for key in run: dc += ' ' + key + ' ' + str(run[key]) sics.execute(dc) time.sleep(5) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) ''' sics.execute('run ss1u %.2f' % ss1u) sics.execute('run ss1d %.2f' % ss1d) sics.execute('run ss1r %.2f' % ss1r) sics.execute('run ss1l %.2f' % ss1l) sics.execute('run ss2u %.2f' % ss2u) sics.execute('run ss2d %.2f' % ss2d) sics.execute('run ss2r %.2f' % ss2r) sics.execute('run ss2l %.2f' % ss2l) ''' # load sample positions sample_stage_name = configModel.sample_stage sample_positions = str(configModel.sample_position) if (len(sample_positions) == 0) or (sample_positions == 'fixed'): samz_list = [None] else: samz_list = [] stage = SAMPLE_STAGES.get_stage_by_name(sample_stage_name) if stage is None: raise 'Invalid stage name ' + str(sample_stage_name) samz_value = stage.get_samz(sample_positions) samz_list.append(samz_value) print samz_list for samz in samz_list: sics.execute('histmem stop') time.sleep(3) if mode == 'ba': sics.execute('histmem mode unlimited') sics.execute('histmem ba enable') else: sics.execute('histmem mode time') sics.execute('histmem ba disable') if samz is not None: print 'run samz %.2f' % samz sics.execute('run samz %.2f' % samz) # sics.execute('prun samz 2' % samz) !!! time.sleep(1) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) sics.execute('newfile HISTOGRAM_XYT') # sics.execute('autosave 60') # 60 seconds time.sleep(1) # start/stop hmm if mode == 'count_roi': sics.execute('histmem preset 1') time.sleep(1) sics.execute('histmem start') time.sleep(5) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) sics.execute('histmem stop') print 'frames:', len(scan['angles']) count_rate_history = [] for frame_index in xrange(len(scan['angles'])): angle = scan['angles' ][frame_index] preset = scan['presets' ][frame_index] maxTime = scan['maxTimes'][frame_index] print 'drive %s %.6f' % (scanVariable, angle) # sics.drive(scanVariable, float(angle)) sics.execute('drive %s %.6f' % (scanVariable, angle)) time.sleep(10) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) print 'drive done' time.sleep(1) if mode == 'ba': sics.execute('histmem ba roi roi') sics.execute('histmem ba monitor %i' % 1) sics.execute('histmem ba mintime %i' % configModel.min_time) sics.execute('histmem ba maxtime %i' % maxTime) sics.execute('histmem ba maxdetcount %i' % preset) sics.execute('histmem ba maxbmcount -1') sics.execute('histmem ba undermintime ba_maxdetcount') print 'histmem start' sics.execute('histmem start') time0 = time.time() while sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): if time.time() - time0 > 15.0: print 'WARNING: HM may not have started counting. Gumtree will save anyway.' break else: time.sleep(0.5) time0 = time.time() wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) print 'time counted (estimate):', float(time.time() - time0) else: print 'histmem start' while True: if mode == 'count_roi': sics.execute('histmem preset %i' % maxTime) else: sics.execute('histmem preset %i' % preset) time.sleep(5) sics.execute('histmem start') time.sleep(5) if mode == 'count_roi': print 'count_roi' time.sleep(configModel.min_time) count_roi = 0 t0 = time.time() while not sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): try: count_roi = int(sicsext.runCommand('hmm configure num_events_filled_to_count_roi')) # print count_roi if count_roi > preset: print count_roi print 'reached desired count_roi' sics.execute('histmem pause') time.sleep(1) break except: pass time.sleep(0.5) if time.time() - t0 > 5: serverStatus = sics.get_status() t0 = time.time() break else: wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) valid = False for i in xrange(10): time.sleep(1) detector_time = sics.getValue('/instrument/detector/time').getFloatData() valid = (detector_time >= preset - 1) or (detector_time >= preset * 0.90) if valid: break print 'detector_time:', detector_time if valid: break else: print 'scan was invalid and needs to be repeated' # sics.execute('histmem stop') sics.execute('save %i' % frame_index) frame_index += 1 print 'histmem done' #check if in background if early_exit_enabled.value : try: roi_counts = float(sics.get_raw_value('hmm configure num_events_filled_to_count_roi')) roi_time = sics.getValue('/instrument/detector/time').getFloatData() roi_rate = roi_counts / roi_time print 'measured count rate:', roi_rate count_rate_history.append(roi_rate) bkg_frames = background_frames.value bkg_range = background_threshold.value if (len(count_rate_history) >= bkg_frames) and (builtin_max(count_rate_history[-bkg_frames:]) < bkg_range): print 'background reached' print 'scan completed (early exit)' break except: pass sics.execute('newfile clear') # sics.execute('autosave 0') # disable autosave # Get output filename filenameController = sicsController.findDeviceController('datafilename') savedFilename = filenameController.getValue().getStringData() print 'saved:', savedFilename sics.execute('histmem ba disable') print 'done' print def btnPlotSteps_clicked(): scan = getScan() # print 'zero angle:' # print scan_reference.value print '' print 'scan variable range [%f, %f]' % (scan['angles'][0], scan['angles'][-1]) print '' #Plot1.clear() #Plot2.clear() scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if scan_angleMin == 0 and scan_angleMax == 0: print 'please select a scan template' return if scan_angleMin == scan_angleMax: print 'the min angle and max angle can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] #print [scan_angleMin, scan_angleMax] if Plot1.ds != None: Plot1.clear_masks() Plot1.add_dataset(dummy) Plot1.title = 'Preview' Plot1.x_label = 'm2om' Plot1.y_label = 'counts per sec' # Plot1.x_range = [scan_angleMin,scan_angleMax] inclusive = True angles = scan['angles'] for i in xrange(1, len(angles)): xL = angles[i - 1] xH = angles[i ] Plot1.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot1.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # convert to q PLOT 2 crystal = str(crystal_name.value) if 'Si111' in crystal: wavelength = 4.74 elif 'Si311' in crystal: wavelength = 2.37 else: wavelength = float('nan') q = convert2q(angles, scan_reference.value, wavelength) scan_angleMin = builtin_min(q) scan_angleMax = builtin_max(q) if isnan(scan_angleMin) or isnan(scan_angleMax): print 'please check the wavelength' return if scan_angleMin == scan_angleMax: print 'the min q and max q can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] if Plot2.ds != None: Plot2.clear_masks() Plot2.add_dataset(dummy) Plot2.title = 'Preview' Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.x_range = [1e-6, q[-1]] for i in xrange(1, len(q)): xL = q[i - 1] xH = q[i ] Plot2.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot2.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # print "angles" # print angles # print q print '' print 'scan q-range [%f, %f]' % (q[0], q[-1]) print '' def openDataset(path): ds = df[str(path)] ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm') # ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm_xy') ds.__iDictionary__.addEntry('time', 'entry1/instrument/detector/time') ds.__iDictionary__.addEntry('m1om', 'entry1/instrument/crystal/m1om') ds.__iDictionary__.addEntry('m1chi', 'entry1/instrument/crystal/m1chi') ds.__iDictionary__.addEntry('m1x', 'entry1/instrument/crystal/m1x') ds.__iDictionary__.addEntry('m2om', 'entry1/instrument/crystal/m2om') ds.__iDictionary__.addEntry('m2chi', 'entry1/instrument/crystal/m2chi') ds.__iDictionary__.addEntry('m2x', 'entry1/instrument/crystal/m2x') ds.__iDictionary__.addEntry('m2y', 'entry1/instrument/crystal/m2y') ds.__iDictionary__.addEntry('mdet', 'entry1/instrument/crystal/mdet') ds.__iDictionary__.addEntry('pmom', 'entry1/instrument/crystal/pmom') ds.__iDictionary__.addEntry('pmchi', 'entry1/instrument/crystal/pmchi') ds.__iDictionary__.addEntry('ss1u', 'entry1/instrument/slits/ss1u') ds.__iDictionary__.addEntry('ss1d', 'entry1/instrument/slits/ss1d') ds.__iDictionary__.addEntry('ss1r', 'entry1/instrument/slits/ss1r') ds.__iDictionary__.addEntry('ss1l', 'entry1/instrument/slits/ss1l') ds.__iDictionary__.addEntry('ss2u', 'entry1/instrument/slits/ss2u') ds.__iDictionary__.addEntry('ss2d', 'entry1/instrument/slits/ss2d') ds.__iDictionary__.addEntry('ss2r', 'entry1/instrument/slits/ss2r') ds.__iDictionary__.addEntry('ss2l', 'entry1/instrument/slits/ss2l') ds.__iDictionary__.addEntry('ss1vo', 'entry1/instrument/slits/ss1vo') ds.__iDictionary__.addEntry('ss1vg', 'entry1/instrument/slits/ss1vg') ds.__iDictionary__.addEntry('ss1ho', 'entry1/instrument/slits/ss1ho') ds.__iDictionary__.addEntry('ss1hg', 'entry1/instrument/slits/ss1hg') ds.__iDictionary__.addEntry('ss2vo', 'entry1/instrument/slits/ss2vo') ds.__iDictionary__.addEntry('ss2vg', 'entry1/instrument/slits/ss2vg') ds.__iDictionary__.addEntry('ss2ho', 'entry1/instrument/slits/ss2ho') ds.__iDictionary__.addEntry('ss2hg', 'entry1/instrument/slits/ss2hg') ds.__iDictionary__.addEntry('samplename', 'entry1/sample/name') ds.__iDictionary__.addEntry('wavelength', 'entry1/instrument/crystal/wavelength') ds.__iDictionary__.addEntry('TimeStamp', 'entry1/time_stamp') return ds def btnPlot_clicked(): #Plot1.clear() #Plot2.clear() fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return path = fns[0] basename = os.path.basename(str(path)) basename = basename[:basename.find('.nx.hdf')] ds = openDataset(path) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] samplename = str(ds.samplename) sorting = scan_variable_sorting.value if sorting: info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) Plot1.clear() if str(combine_mode.value) == 'individual': for tid in tids: if ds.hmm.ndim == 4: data[:] = ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] = ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] # dataF = data.float_copy() # dataF.title = 'Tube %i' % tid # Plot1.add_dataset(dataF) Plot1.title = 'Count Rate (individual)' else: for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] data.title = 'Tubes ' + str(tids) Plot1.set_dataset(data) Plot1.set_mouse_follower_precision(6, 2, 2) Plot1.title = basename + ' (combined): ' + samplename # Plot1.title = Plot1.title + ' ' + basename if Plot1.ds is not None: Plot1.x_label = str(scan_variable_plot.value) Plot1.y_label = 'counts per sec' Plot2.clear() time.sleep(0.3) ds0 = Plot1.ds[0] # # don't understand how this works xMax = 0 yMax = 0 for i in xrange(len(ds0)): if yMax < ds0[i]: xMax = ds0.axes[0][i] yMax = ds0[i] peakangle = xMax q = convert2q(scanVariable, peakangle, ds.wavelength) data.axes[0] = q[:] Plot2.set_dataset(data) Plot2.set_mouse_follower_precision(6, 2, 2) Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' # Plot1.title = 'Main Detector ' + basename + ': ' + samplename # Plot2.title = 'Sample: ' + samplename + '; ' + sampledescription Plot2.title = basename + ' (combined): ' + samplename Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.set_marker_on(True) # plotXMax = Par('float', q[-1]) # Plot2.x_range = [1e-6, plotXMax.value] if q[-1] > 1e-6 : Plot2.x_range = [1e-6, q[-1]] def convert2q(angles, reference, wavelength): if wavelength is list: wavelength = wavelength[0] wavelength = float(wavelength) deg2rad = 3.14159265359 / 180 f = 4 * 3.14159265359 / wavelength if bool(negative_steps.value): f *= -1.0 q = [(f * sin(deg2rad * (angle - reference) / 2)) for angle in angles] return q def __run_script__(fns): # Use the provided resources, please don't remove. global Plot1 global Plot2 global Plot3 print 'please press "Run Single Scan" or "Run Multiple Scans"' btnPlot_clicked() def __dispose__(): global Plot1 global Plot2 global Plot3 Plot1.clear() Plot2.clear() Plot3.clear() # # model class ConfigurationModel: def __init__(self): self.scanVariable = str(scan_variable.value) self.scanVariable = self.scanVariable[:self.scanVariable.find(' ')] self.crystal = str(crystal_name.value) self.mode = str(scan_mode.value) self.scan = getScan() self.scan_reference = scan_reference.value self.logscale = bool(logscale_position.value) self.negative = bool(negative_steps.value) self.stepInfo = [] for step in stepInfo: d = dict() for key in step.keys(): d[key] = step[key].value self.stepInfo.append(d); self.user_name = str(user_name.value) self.user_email = str(user_email.value) self.sample_name = str(sample_name.value) self.sample_description = str(sample_description.value) self.sample_thickness = float(sample_thickness.value) # vertical/horizontal pre-slit self.ss1vg = float(pss_ss1vg.value) self.ss1vo = float(pss_ss1vo.value) self.ss1hg = float(pss_ss1hg.value) self.ss1ho = float(pss_ss1ho.value) # vertical/horizontal post-slit self.ss2vg = float(pss_ss2vg.value) self.ss2vo = float(pss_ss2vo.value) self.ss2hg = float(pss_ss2hg.value) self.ss2ho = float(pss_ss2ho.value) # load sample positions self.sample_stage = str(scan_sample_stage.value) self.sample_position = str(scan_sample_position.value) self.min_time = int(scan_min_time.value) # load early exit self.early_exit_enabled = bool(early_exit_enabled.value) self.bkg_frames = int(background_frames.value) self.bkg_threshold = float(background_threshold.value) def apply(self): for option in scan_variable.options: if self.scanVariable == option[:option.find(' ')]: scan_variable.value = option crystal_name.value = self.crystal scan_mode.value = self.mode logscale_position.value = self.logscale negative_steps.value = self.negative scan_reference.value = self.scan_reference i = 0 for step in self.stepInfo: for key in step.keys(): stepInfo[i][key].value = step[key] setEnabled(i) i += 1 setScanMode() user_name.value = self.user_name user_email.value = self.user_email sample_name.value = self.sample_name sample_description.value = self.sample_description sample_thickness.value = self.sample_thickness # vertical/horizontal pre-slit pss_ss1vg.value = self.ss1vg pss_ss1vo.value = self.ss1vo pss_ss1hg.value = self.ss1hg pss_ss1ho.value = self.ss1ho # vertical/horizontal post-slit pss_ss2vg.value = self.ss2vg pss_ss2vo.value = self.ss2vo pss_ss2hg.value = self.ss2hg pss_ss2ho.value = self.ss2ho # load sample positions scan_sample_position.value = self.sample_position scan_sample_stage.value = self.sample_stage scan_min_time.value = self.min_time # load early exit early_exit_enabled.value = self.early_exit_enabled background_frames.value = self.bkg_frames background_threshold.value = self.bkg_threshold if early_exit_enabled.value : background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False improve time estimation __script__.title = 'KKB Measurement Script' __script__.version = '2.1 2017-07-11 saved as KKB_scan' from gumpy.commons import sics from org.gumtree.gumnix.sics.control import ServerStatus from pickle import Pickler, Unpickler import time from math import log as ln from math import exp, isnan, isinf, sin from __builtin__ import max as builtin_max from __builtin__ import min as builtin_min from org.eclipse.swt.widgets import FileDialog from org.eclipse.swt import SWT from org.eclipse.swt.widgets import Display from java.io import File import time from Internal import sample_stage ''' Disable dataset caching ''' DatasetFactory.__cache_enabled__ = False SINGLE_TYPE = SWT.SINGLE SAVE_TYPE = SWT.SAVE MULTI_TYPE = SWT.MULTI class __Display_Runnable__(Runnable): def __init__(self, type=SINGLE_TYPE, ext=['*.*']): self.filename = None self.filenames = None self.path = None self.type = type self.ext = ext def run(self): global __UI__ dialog = FileDialog(__UI__.getShell(), self.type); dialog.setFilterExtensions(self.ext) dialog.open() self.filename = dialog.getFilterPath() + File.separator + dialog.getFileName() self.filenames = dialog.getFileNames() self.path = dialog.getFilterPath() def open_file_dialog(type=SWT.SINGLE, ext=['*.*']): __display_run__ = __Display_Runnable__(type, ext) Display.getDefault().asyncExec(__display_run__) while __display_run__.filename is None: time.sleep(0.5) if type == SWT.MULTI: fns = [] for fn in __display_run__.filenames: fns.append(__display_run__.path + '/' + fn) return fns return __display_run__.filename # # templates reference_templates_dict = {} reference_templates_dict['Si111'] = 180.3565 reference_templates_dict['Si311'] = -0.4100 steps_templates_list = [] # steps_templates['Background'] = [ # 'time', 'logscale', # [20, 6.0e-5, 1200, 1200]] # steps_templates['----------'] = [ # 'time', 'logscale', # [0, 0, 0, 0]] steps_templates_list.append([ 'Si111: Logarithmic Overview Scan', 'time', 'logscale', [17, 1.20e-4, 1, 1200], [30, 22.0, 20, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (few features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [34, 20.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (fine features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [65, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Taiki Scan (15 points)', 'ba', 'logscale', [2, 6.0e-5, 1000, 60], [1, 10000, 1000, 60], [10, 25, 1000, 60]]) ''' steps_templates_list.append([ 'Si111: Kinetic Scan 4 points', 'time', 'logscale', [ 0, 6.0e-5, 1, 1200], [1, 5.0e-3, 180, 1200], [3, 1.5e-2, 180, 1200]]) ''' steps_templates_list.append([ '----------', 'time', 'logscale', [0, 0, 0, 0]]) steps_templates_list.append([ 'Si311: Logarithmic Overview Scan', 'time', 'logscale', [17, 2.0e-5, 1, 1200], [30, 23.0, 20, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 80+29)', 'ba', 'logscale', [80, 2e-5, 1000, 1200], [29, 15.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 40+33)', 'ba', 'logscale', [40, 2e-5, 1000, 1200], [33, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, Taiki)', 'ba', 'logscale', [33, 2e-5, 1000, 1200], [25, 20.0, 1000, 1200]]) ret = sample_stage.check_declarations() if not ret[0] : open_warning(ret[1]) reload(sample_stage) SAMPLE_STAGES = sample_stage.StagePool() # # export path __EXPORT_PATH__ = 'V:/shared/KKB Logbook/Temp Plot Data Repository/' if not os.path.exists(__EXPORT_PATH__): os.makedirs(__EXPORT_PATH__) # # User Details user_name = Par('string', 'Christine', options=['Christine', 'Lela', 'Jitendra']) user_name.title = 'Name' user_email = Par('string', 'cre@ansto.gov.au', options=['cre@ansto.gov.au', 'liliana.decampo@ansto.gov.au', 'jtm@ansto.gov.au']) user_email.title = 'EMail' g0 = Group('User Details') g0.numColumns = 2 g0.add(user_name, user_email) # # Sample Details sample_name = Par('string', 'UNKNOWN', options=['Empty Cell', 'Empty Beam'], command="sample_thickness.enabled = sample_name.value not in ['Empty Cell', 'Empty Beam']") sample_name.title = 'Name' sample_description = Par('string', 'UNKNOWN') sample_description.title = 'Description' sample_thickness = Par('string', '1', options=['0.01', '0.1', '1.0', '10.0']) sample_thickness.title = 'Thickness (mm)' g0 = Group('Sample Details') g0.numColumns = 2 g0.add(sample_name, sample_thickness, sample_description) # Group('Sample Details').add(sample_name, sample_description, sample_thickness) # # Crystal crystal_name = Par('string', 'UNKNOWN') crystal_name.title = 'Name' crystal_name.enabled = False try: m2om = sics.getValue('/instrument/crystal/m2om').getFloatData() if m2om > 90: crystal_name.value = 'Si111 (4.74 Angstroms)' else: crystal_name.value = 'Si311 (2.37 Angstroms)' except: pass g0 = Group('Crystal Info') g0.numColumns = 2 g0.add(crystal_name) # CRYSTAL END ############################################# # SLIT 1 ####################################################################### def updateOffset(gapBox, offsetBox): offsetBox.enabled = 'fully' not in gapBox.value def getSlitGapAndOffset(aPath, a0, bPath, b0): try: a = sics.getValue(aPath).getFloatData() b = sics.getValue(bPath).getFloatData() gap = (a - a0 - (b - b0)) / 1.0 offset = (a - a0 + (b - b0)) / 2.0 return (gap, offset) except: return (float('nan'), float('nan')) crystal = str(crystal_name.value) if 'Si111' in crystal: ss1r0 = 28.35 ss1l0 = 27.75 elif 'Si311' in crystal: ss1r0 = -9.16 ss1l0 = -9.76 else: ss1r0 = float('nan') ss1l0 = float('nan') ss1u0 = -8.04 ss1d0 = -7.30 (ss1vg, ss1vo) = getSlitGapAndOffset('/instrument/slits/ss1u', ss1u0, '/instrument/slits/ss1d', ss1d0) (ss1hg, ss1ho) = getSlitGapAndOffset('/instrument/slits/ss1r', ss1r0, '/instrument/slits/ss1l', ss1l0) pss_ss1vg = Par('string', '%.1f' % ss1vg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1vg, pss_ss1vo)') pss_ss1vg.title = 'Vertical Gap (mm)' # pss_ss1vg.colspan = 50 pss_ss1vo = Par('float', ss1vo) pss_ss1vo.title = 'Vertical Offset (mm)' # pss_ss1vo.colspan = 50 pss_ss1hg = Par('string', '%.1f' % ss1hg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1hg, pss_ss1ho)') pss_ss1hg.title = 'Horizontal Gap (mm)' # pss_ss1hg.colspan = 50 pss_ss1ho = Par('float', ss1ho) pss_ss1ho.title = 'Horizontal Offset (mm)' # pss_ss1ho.colspan = 50 g0 = Group('Sample Slit Settings') g0.numColumns = 2 g0.add(pss_ss1vg, pss_ss1vo, pss_ss1hg, pss_ss1ho) # SLIT 1 END ####################################################################### ## Scan parameters ########################################################################################################## scan_variable = Par('string', 'm2om [deg]', options=[ #'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]'], command="scan_variable_plot.value = scan_variable.value") scan_variable.title = 'Scan Variable' scan_variable.colspan = 25 scan_reference = Par('float', '0.0') scan_reference.title = 'Zero Angle' scan_reference.colspan = 25 for key in reference_templates_dict.keys(): if key in crystal_name.value: scan_reference.value = reference_templates_dict[key] scan_mode = Par('string', 'ba', options=['ba', 'time'], command='setScanMode()') scan_mode.title = 'Acquisition Mode' scan_mode.colspan = 25 scan_min_time = Par('int', '5') scan_min_time.title = 'Min Time (sec)' scan_min_time.colspan = 25 empty_label = Par('label', '') empty_label.colspan = 25 scan_sample_stage = Par('string', '', command = 'sample_stage_changed()') scan_sample_stage.colspan = 25 scan_sample_stage.title = 'Sample Stage' scan_sample_stage.options = SAMPLE_STAGES.get_stage_names() current_stage = SAMPLE_STAGES.get_stage_in_service() if not current_stage is None: scan_sample_stage.value = current_stage.get_name() scan_sample_position = Par('string', 'fixed') scan_sample_position.title = 'Sample Position' scan_sample_position.colspan = 25 scan_sample_position.options = ['fixed', '----------'] if not current_stage is None: scan_sample_position.options += current_stage.get_sample_indexes() logscale_position = Par('bool', False, command='setStepTitles()') logscale_position.title = 'Logarithmic Steps' logscale_position.colspan = 25 negative_steps = Par('bool', False) negative_steps.title = 'Negative Steps' negative_steps.colspan = 25 steps_label = Par('label', 'Please choose scan template or adjust steps manually: ') steps_label.colspan = 200 steps_templates = Par('string', '', options=[item[0] for item in steps_templates_list], command='setTemplate()') steps_templates.title = 'Scan Template' steps_templates.colspan = 100 early_exit_enabled = Par('bool', True, command = "set_early_exit_enabled()") early_exit_enabled.title = "Enable Early Exit" early_exit_enabled.colspan = 25 background_frames = Par('int', 3) background_frames.title = 'Background Frames' background_frames.colspan = 25 background_threshold = Par('float', 0.26) background_threshold.title = 'Background Threshold' background_threshold.colspan = 25 # steps_space = Par('space', '') # steps_space.colspan = 10 g0 = Group('Scan Parameters') g0.numColumns = 100 # 9 g0.add(scan_variable, scan_mode, scan_reference, early_exit_enabled, \ logscale_position, scan_min_time, scan_sample_stage, background_frames, \ negative_steps, empty_label, scan_sample_position, background_threshold, \ steps_label, steps_templates) def sample_stage_changed(): stage = SAMPLE_STAGES.get_stage_by_name(str(scan_sample_stage.value)) # scan_sample_position.value = 'fixed' if not stage is None: scan_sample_position.options = ['fixed', '----------'] + stage.get_sample_indexes() else: scan_sample_position.options = ['fixed', '----------'] def set_early_exit_enabled(): if early_exit_enabled.value: background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False stepInfo = [] for i in xrange(4): steps_e = Par('bool', True, command='setEnabled(%i)' % i) steps_e.title = '(%i)' % (i + 1) steps_e.colspan = 10 steps_m = Par('int', 0, command='clearScanTemplateSelection()') steps_m.title = 'Number of points' steps_m.colspan = 20 steps_s = Par('float', 0, command='clearScanTemplateSelection()') steps_s.title = 'Step Size [deg]' steps_s.colspan = 20 steps_p = Par('int', 0, command='clearScanTemplateSelection()') steps_p.title = 'Mode Preset' steps_p.colspan = 25 steps_t = Par('int', 1200, command='clearScanTemplateSelection()') steps_t.title = 'Max Time' steps_t.colspan = 25 stepInfo.append({'enabled': steps_e, 'dataPoints':steps_m, 'stepSize':steps_s, 'preset':steps_p, 'maxTime':steps_t}) g0.add(steps_e, steps_m, steps_s, steps_p, steps_t) def clearScanTemplateSelection(): steps_templates.value = None btnPlotSteps = Act('btnPlotSteps_clicked()', 'Plot Measurement Steps') # 'compare measurement steps with previous scan') btnPlotSteps.colspan = 50 cnfg_save_btn = Act('saveConfiguration()', 'Save Single Scan Parameters') cnfg_save_btn.colspan = 50 btnTimeEstimation = Act('runTimeEstimation()', 'Time Estimation with selected Data Set') btnTimeEstimation.colspan = 50 txtTimeEstimation = Par('int', '0') txtTimeEstimation.title = 'Time Estimation (min)' txtTimeEstimation.enabled = False txtTimeEstimation.colspan = 50 g0.add(btnPlotSteps, cnfg_save_btn, btnTimeEstimation, txtTimeEstimation) def runTimeEstimation(): if str(scan_mode.value) == 'time': scan = getScan() times = scan['presets'] txtTimeEstimation.value = int((sum(times) + len(times) * 25) / 60.0) # 25 seconds for each move return fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return ds = openDataset(fns[0]) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time data[:] = [data[item[0]] for item in info] # sorting # angle and count rate a0 = [float(angle) for angle in scanVariable] r0 = [float(rate) for rate in data[:]] # angle, counts, max time and min time model = ConfigurationModel() scan = model.scan tMin = model.min_time a1 = scan['angles'] c1 = scan['presets'] t1 = scan['maxTimes'] total = 0.0 for i in xrange(len(a1)): try: rate = sample(a0, r0, a1[i]) time = c1[i] / rate if time < tMin: total += tMin elif time > t1[i]: total += t1[i] else: total += time #print ("angle: " + str(a1[i]) # + " expected counts: " + str(c1[i]) # + " rate:" + str(rate) # + " time:" + str(time) # + " total:" + str(total)) except ValueError as e: if e.message == "OutOfRange": total += t1[i] # add max time else: raise total += int(len(a1) * 25) # 25 seconds for each move txtTimeEstimation.value = int(total / 60.0) def sample(x0, y0, x1): from __builtin__ import max, min if len(x0) != len(y0): raise Exception("len(x0) != len(y0)") x0_min = min(x0) x0_max = max(x0) if len(x0) < 2: raise Exception("len(x0) < 2") if x0_min >= x0_max: raise Exception("x0_min >= x0_max") if x1 < x0_min: raise ValueError("OutOfRange") if x0_max < x1: raise ValueError("OutOfRange") i0 = 0 i1 = 1 x0i0 = x0[i0] y0i0 = y0[i0] x0i1 = x0[i1] y0i1 = y0[i1] # in case first x values are equal while x0i0 == x0i1: i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] # not iterable while x0i1 < x1: x0i0 = x0i1 y0i0 = y0i1 i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] return y0i0 + (x1 - x0i0) * (y0i1 - y0i0) / (x0i1 - x0i0) ## Scan parameters END ######################################################################### ## RUN ############################################## cnfg_load_btn = Act('loadConfigurations()', 'Load Multiple Scan Parameters') cnfg_lookup = dict() cnfg_options = Par('string', '', options=[''], command="applyConfiguration()") cnfg_options.title = 'Read' start_scan = Act('startScan(ConfigurationModel())', '############# Run Single Scan #############') cnfg_run_btn = Act('runConfigurations()', '############# Run Multiple Scans #############') g0 = Group('Execute Scans') g0.numColumns = 1 g0.add(start_scan, cnfg_load_btn, cnfg_options, cnfg_run_btn) ## Save/Load Configuration END############################################################################ def saveConfiguration(): file = open_file_dialog(type=SAVE_TYPE, ext=['*.kkb']) try: fh = open(file, 'w') except: print 'not saved' return try: p = Pickler(fh) # header p.dump('KKB') # content model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): p.dump(att) p.dump(att_value) print 'saved' finally: fh.close() def loadConfigurations(): fileList = open_file_dialog(type=MULTI_TYPE, ext=['*.kkb']) if not fileList: return finalDict = dict() finalNames = [] for path in fileList: fh = open(path, 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', os.path.basename(path) else: model = ConfigurationModel() # set defaults model.negative = False # old models may not have this attribute for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', os.path.basename(path) break setattr(model, att, p.load()) else: name = os.path.basename(path) finalDict[name] = path finalNames.append(name) finally: fh.close() cnfg_lookup.clear() cnfg_lookup.update(finalDict) cnfg_options.value = finalNames[0] if finalNames else '' cnfg_options.options = finalNames # time.sleep(0.5) def applyConfiguration(): file = str(cnfg_options.value) if not file: return fh = open(cnfg_lookup[file], 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: # print 'read:', file model.apply() finally: fh.close() def runConfigurations(): for file in cnfg_options.options: fh = open(cnfg_lookup[file], 'r') try: cnfg_options.command = '' cnfg_options.value = file applyConfiguration() p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: print 'run:', file startScan(model) finally: cnfg_options.command = 'applyConfiguration()' fh.close() # # Plot tubes_label = Par('label', 'Main Detector:') tubes_label.colspan = 1 combine_tube0 = Par('bool', True) combine_tube0.title = ' Tube 0' combine_tube0.colspan = 1 combine_tube1 = Par('bool', True) combine_tube1.title = ' Tube 1' combine_tube1.colspan = 1 combine_tube2 = Par('bool', True) combine_tube2.title = ' Tube 2' combine_tube2.colspan = 1 combine_tube3 = Par('bool', True) combine_tube3.title = ' Tube 3' combine_tube3.colspan = 1 combine_tube4 = Par('bool', True) combine_tube4.title = ' Tube 4' combine_tube4.colspan = 1 combine_tube6 = Par('bool', False) combine_tube6.title = ' Tube 6' combine_tube6.colspan = 1 combine_mode = Par('string', 'combined', options=['individual', 'combined']) combine_mode.title = ' Mode' combine_mode.colspan = 1 trans_tube_label = Par('label', 'Trans Detector: ') trans_tube_label.colspan = 2 check_tube9 = Par('bool', True) check_tube9.title = ' Tube 9: Si (311)' check_tube9.colspan = 2 check_tube10 = Par('bool', False) check_tube10.title = ' Tube 10: Si (111)' check_tube10.colspan = 2 # steps_space = Par('space', '') # steps_space.colspan = 12 scan_variable_plot = Par('string', 'm2om [deg]', options=[ 'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]']) scan_variable_plot.title = 'Scan Variable' scan_variable_plot.colspan = 1 scan_variable_sorting = Par('bool', True) scan_variable_sorting.title = 'Sorting' scan_variable_sorting.colspan = 1 btnPlot = Act('btnPlot_clicked()', 'Plot Selected Data Set') btnPlot.colspan = 8 g0 = Group('Plotting') g0.numColumns = 7 g0.add(tubes_label, combine_tube0, combine_tube1, combine_tube2, combine_tube3, combine_tube4, combine_tube6, combine_mode, trans_tube_label, check_tube9, check_tube10, scan_variable_plot, scan_variable_sorting, btnPlot) # export to csv # btnExport = Act('export_clicked()', 'Export to CSV') ################################# SLIT 2 ########################################################## ss2u0 = 0 # 2.00 ss2d0 = 0 # 1.40 ss2l0 = 0 # 5 0.50 ss2r0 = 0 # -2 -1.00 (ss2vg, ss2vo) = getSlitGapAndOffset('/instrument/slits/ss2u', ss2u0, '/instrument/slits/ss2d', ss2d0) (ss2hg, ss2ho) = getSlitGapAndOffset('/instrument/slits/ss2r', ss2r0, '/instrument/slits/ss2l', ss2l0) pss_ss2vg = Par('string', '%.1f' % ss2vg, options=pss_ss1vg.options, command='updateOffset(pss_ss2vg, pss_ss2vo)') pss_ss2vg.title = 'Vertical Opening (mm)' pss_ss2vo = Par('float', ss2vo) pss_ss2vo.title = 'Vertical Offset (mm)' pss_ss2hg = Par('string', '%.1f' % ss2hg, options=pss_ss1hg.options, command='updateOffset(pss_ss2hg, pss_ss2ho)') pss_ss2hg.title = 'Horizontal Opening (mm)' pss_ss2ho = Par('float', ss2ho) pss_ss2ho.title = 'Horizontal Offset (mm)' g0 = Group('Post-Sample Slit') g0.numColumns = 2 g0.add(pss_ss2vg, pss_ss2vo, pss_ss2hg, pss_ss2ho) ################################# SLIT 2 END ########################################################## def setStepTitles(): if logscale_position.value: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Factor [%]" else: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Size [deg]" __UI__.updateUI() def setTemplate(): try: matches = [item for item in steps_templates_list if item[0] == steps_templates.value] if len(matches) != 1: steps_templates.value = None return template = matches[0] # ignore '----' if template[0][0] == '-': steps_templates.value = None return scan_mode.value = template[1] if template[2] == 'logscale': logscale_position.value = True elif template[2] == 'linear': logscale_position.value = False setStepTitles() # by default templates measure in positive direction negative_steps.value = False setScanMode() headers = 3 for i in xrange(len(template) - headers): templateItem = template[i + headers] stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = True stepInfoItem['dataPoints'].enabled = True stepInfoItem['dataPoints'].value = templateItem[0] stepInfoItem['stepSize' ].enabled = True stepInfoItem['stepSize' ].value = templateItem[1] stepInfoItem['preset' ].enabled = True stepInfoItem['preset' ].value = templateItem[2] stepInfoItem['maxTime' ].enabled = scan_min_time.enabled stepInfoItem['maxTime' ].value = templateItem[3] for i in xrange(len(template) - headers, len(stepInfo)): stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = False stepInfoItem['dataPoints'].enabled = False stepInfoItem['stepSize' ].enabled = False stepInfoItem['preset' ].enabled = False stepInfoItem['maxTime' ].enabled = False except: pass def setScanMode(): if scan_mode.value == 'time': scan_min_time.enabled = False for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = False else: scan_min_time.enabled = True for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = stepInfoItem['enabled'].value def setEnabled(index): steps_templates.value = None stepItem = stepInfo[index] value = stepItem['enabled'].value stepItem['dataPoints'].enabled = value stepItem['stepSize' ].enabled = value stepItem['preset' ].enabled = value stepItem['maxTime' ].enabled = value and scan_min_time.enabled setTemplate() def getScan(): scan = { 'angles': [], 'presets': [], 'maxTimes': [], 'groups': [] } first = True angle_ref = scan_reference.value angle = angle_ref logscale = False # first data points are always on a linear scale negative = bool(negative_steps.value) for stepInfoItem in stepInfo: if stepInfoItem['enabled'].value: dataPoints = stepInfoItem['dataPoints'].value stepSize = stepInfoItem['stepSize' ].value preset = stepInfoItem['preset' ].value maxTime = stepInfoItem['maxTime' ].value if (dataPoints > 0) and (stepSize <= 0.0): raise Exception('step sizes have to be positive') for i in xrange(dataPoints): if first and (i == 0): angle -= ((dataPoints - 1) / 2.0) * stepSize; elif logscale: # for logscale stepSize is a stepFactor angle = angle_ref + (angle - angle_ref) * (1.0 + 0.01 * stepSize) else: angle += stepSize #print angle scan['angles' ].append(angle) scan['presets' ].append(preset) scan['maxTimes'].append(maxTime) if i == 0: scan['groups'].append(angle) first = False logscale = bool(logscale_position.value) if negative: # negate angles with reference to zero angle scan['angles'] = [angle_ref - (angle - angle_ref) for angle in scan['angles']] return scan def wait_for_idle(): c_time = time.time() while not sics.getSicsController().getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): time.sleep(0.1) if time.time() - c_time > 5: serverStatus = sics.get_status() c_time = time.time() def startScan(configModel): ''' check instrument ready ''' is_ready = False try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' except: pass if not is_ready: is_confirmed = open_question('The instrument is not ready '\ + 'according to the SIS status. Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?') if not is_confirmed: slog('Instrument is not ready. Quit the scan.') return else: try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' except: pass if not is_ready: slog('scan continued without instrument ready') ''' setup ''' scanVariable = configModel.scanVariable crystal = configModel.crystal mode = configModel.mode MainDeadTime = 1.08E-6 TransDeadTime = 1.08E-6 if 'Si111' in crystal: empLevel = 0.3 bkgLevel = 0.21 dOmega = 2.3E-6 gDQv = 0.0586 gDQh = 0 wavelength = 4.74 TransmissionTube = 10 TransBackground = 0 # counts per second elif 'Si311' in crystal: empLevel = 0.34 bkgLevel = 0.21 dOmega = 4.6E-7 gDQv = 0.117 gDQh = 0 wavelength = 2.37 TransmissionTube = 9 TransBackground = 0 # counts per second else: print 'selected crystal is invalid' return ''' angles ''' scan = configModel.scan scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if ('m1om' in scanVariable) or ('m2om' in scanVariable): tolerance = 6 approved = False if 'Si111' in crystal: if (180 - tolerance <= scan_angleMin) and (scan_angleMax <= 180 + tolerance): approved = True elif 'Si311' in crystal: if (0 - tolerance <= scan_angleMin) and (scan_angleMax <= 0 + tolerance): approved = True if not approved: print 'angle out of range' return ''' execution ''' sics.execute('hset user/name ' + configModel.user_name) sics.execute('hset user/email ' + configModel.user_email) sics.execute('hset sample/name ' + configModel.sample_name) sics.execute('hset sample/description ' + configModel.sample_description) sics.execute('hset sample/thickness %g' % configModel.sample_thickness) sics.execute('hset experiment/bkgLevel %g' % bkgLevel) sics.execute('hset experiment/empLevel %g' % empLevel) sics.execute('hset instrument/detector/MainDeadTime %g' % MainDeadTime) sics.execute('hset instrument/detector/TransDeadTime %g' % TransDeadTime) sics.execute('hset instrument/detector/TransBackground %g' % TransBackground) sics.execute('hset instrument/detector/TransmissionTube %i' % TransmissionTube) sics.execute('hset instrument/crystal/dOmega %g' % dOmega) sics.execute('hset instrument/crystal/gDQv %g' % gDQv) sics.execute('hset instrument/crystal/gDQh %g' % gDQh) sics.execute('hset instrument/crystal/wavelength %g' % wavelength) sics.execute('hset instrument/crystal/scan_variable ' + scanVariable); sicsController = sics.getSicsController() # slits def getSlitValues(gap, offset, a0, b0, aOpen, bOpen): if gap == 'fully opened': return (aOpen, bOpen) if gap == 'fully closed': gap = -5.0 offset = 0.0 a = a0 + 0.5 * float(gap) + float(offset) b = b0 - 0.5 * float(gap) + float(offset) return (a, b) ss1vg = configModel.ss1vg ss1vo = configModel.ss1vo ss1hg = configModel.ss1hg ss1ho = configModel.ss1ho ss2vg = configModel.ss2vg ss2vo = configModel.ss2vo ss2hg = configModel.ss2hg ss2ho = configModel.ss2ho (ss1u, ss1d) = getSlitValues(ss1vg, ss1vo, ss1u0, ss1d0, 35.8, -38.8) (ss1r, ss1l) = getSlitValues(ss1hg, ss1ho, ss1r0, ss1l0, 57.0, -58.0) (ss2u, ss2d) = getSlitValues(ss2vg, ss2vo, ss2u0, ss2d0, 37.0, -39.5) (ss2r, ss2l) = getSlitValues(ss2hg, ss2ho, ss2r0, ss2l0, 35.0, -35.0) # apply slits run = {} run['ss1u'] = ss1u run['ss1d'] = ss1d run['ss1r'] = ss1r run['ss1l'] = ss1l run['ss2u'] = ss2u run['ss2d'] = ss2d run['ss2r'] = ss2r run['ss2l'] = ss2l # sics.multiDrive(run) dc = 'drive' for key in run: dc += ' ' + key + ' ' + str(run[key]) sics.execute(dc) time.sleep(5) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) ''' sics.execute('run ss1u %.2f' % ss1u) sics.execute('run ss1d %.2f' % ss1d) sics.execute('run ss1r %.2f' % ss1r) sics.execute('run ss1l %.2f' % ss1l) sics.execute('run ss2u %.2f' % ss2u) sics.execute('run ss2d %.2f' % ss2d) sics.execute('run ss2r %.2f' % ss2r) sics.execute('run ss2l %.2f' % ss2l) ''' # load sample positions sample_stage_name = configModel.sample_stage sample_positions = str(configModel.sample_position) if (len(sample_positions) == 0) or (sample_positions == 'fixed'): samz_list = [None] else: samz_list = [] stage = SAMPLE_STAGES.get_stage_by_name(sample_stage_name) if stage is None: raise 'Invalid stage name ' + str(sample_stage_name) samz_value = stage.get_samz(sample_positions) samz_list.append(samz_value) print samz_list for samz in samz_list: sics.execute('histmem stop') time.sleep(3) if mode == 'ba': sics.execute('histmem mode unlimited') sics.execute('histmem ba enable') else: sics.execute('histmem mode time') sics.execute('histmem ba disable') if samz is not None: print 'run samz %.2f' % samz sics.execute('run samz %.2f' % samz) # sics.execute('prun samz 2' % samz) !!! time.sleep(1) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) sics.execute('newfile HISTOGRAM_XYT') # sics.execute('autosave 60') # 60 seconds time.sleep(1) # start/stop hmm if mode == 'count_roi': sics.execute('histmem preset 1') time.sleep(1) sics.execute('histmem start') time.sleep(5) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) sics.execute('histmem stop') print 'frames:', len(scan['angles']) count_rate_history = [] for frame_index in xrange(len(scan['angles'])): angle = scan['angles' ][frame_index] preset = scan['presets' ][frame_index] maxTime = scan['maxTimes'][frame_index] print 'drive %s %.6f' % (scanVariable, angle) # sics.drive(scanVariable, float(angle)) sics.execute('drive %s %.6f' % (scanVariable, angle)) time.sleep(10) wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) print 'drive done' time.sleep(1) if mode == 'ba': sics.execute('histmem ba roi roi') sics.execute('histmem ba monitor %i' % 1) sics.execute('histmem ba mintime %i' % configModel.min_time) sics.execute('histmem ba maxtime %i' % maxTime) sics.execute('histmem ba maxdetcount %i' % preset) sics.execute('histmem ba maxbmcount -1') sics.execute('histmem ba undermintime ba_maxdetcount') print 'histmem start' sics.execute('histmem start') time0 = time.time() while sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): if time.time() - time0 > 15.0: print 'WARNING: HM may not have started counting. Gumtree will save anyway.' break else: time.sleep(0.5) time0 = time.time() wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) print 'time counted (estimate):', float(time.time() - time0) else: print 'histmem start' while True: if mode == 'count_roi': sics.execute('histmem preset %i' % maxTime) else: sics.execute('histmem preset %i' % preset) time.sleep(5) sics.execute('histmem start') time.sleep(5) if mode == 'count_roi': print 'count_roi' time.sleep(configModel.min_time) count_roi = 0 t0 = time.time() while not sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): try: count_roi = int(sicsext.runCommand('hmm configure num_events_filled_to_count_roi')) # print count_roi if count_roi > preset: print count_roi print 'reached desired count_roi' sics.execute('histmem pause') time.sleep(1) break except: pass time.sleep(0.5) if time.time() - t0 > 5: serverStatus = sics.get_status() t0 = time.time() break else: wait_for_idle() # while not sics.get_status().equals(ServerStatus.EAGER_TO_EXECUTE): # time.sleep(0.5) valid = False for i in xrange(10): time.sleep(1) detector_time = sics.getValue('/instrument/detector/time').getFloatData() valid = (detector_time >= preset - 1) or (detector_time >= preset * 0.90) if valid: break print 'detector_time:', detector_time if valid: break else: print 'scan was invalid and needs to be repeated' # sics.execute('histmem stop') sics.execute('save %i' % frame_index) frame_index += 1 print 'histmem done' #check if in background if early_exit_enabled.value : try: roi_counts = float(sics.get_raw_value('hmm configure num_events_filled_to_count_roi')) roi_time = sics.getValue('/instrument/detector/time').getFloatData() roi_rate = roi_counts / roi_time print 'measured count rate:', roi_rate count_rate_history.append(roi_rate) bkg_frames = background_frames.value bkg_range = background_threshold.value if (len(count_rate_history) >= bkg_frames) and (builtin_max(count_rate_history[-bkg_frames:]) < bkg_range): print 'background reached' print 'scan completed (early exit)' break except: pass sics.execute('newfile clear') # sics.execute('autosave 0') # disable autosave # Get output filename filenameController = sicsController.findDeviceController('datafilename') savedFilename = filenameController.getValue().getStringData() print 'saved:', savedFilename sics.execute('histmem ba disable') print 'done' print def btnPlotSteps_clicked(): scan = getScan() # print 'zero angle:' # print scan_reference.value print '' print 'scan variable range [%f, %f]' % (scan['angles'][0], scan['angles'][-1]) print '' #Plot1.clear() #Plot2.clear() scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if scan_angleMin == 0 and scan_angleMax == 0: print 'please select a scan template' return if scan_angleMin == scan_angleMax: print 'the min angle and max angle can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] #print [scan_angleMin, scan_angleMax] if Plot1.ds != None: Plot1.clear_masks() Plot1.add_dataset(dummy) Plot1.title = 'Preview' Plot1.x_label = 'm2om' Plot1.y_label = 'counts per sec' # Plot1.x_range = [scan_angleMin,scan_angleMax] inclusive = True angles = scan['angles'] for i in xrange(1, len(angles)): xL = angles[i - 1] xH = angles[i ] Plot1.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot1.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # convert to q PLOT 2 crystal = str(crystal_name.value) if 'Si111' in crystal: wavelength = 4.74 elif 'Si311' in crystal: wavelength = 2.37 else: wavelength = float('nan') q = convert2q(angles, scan_reference.value, wavelength) scan_angleMin = builtin_min(q) scan_angleMax = builtin_max(q) if isnan(scan_angleMin) or isnan(scan_angleMax): print 'please check the wavelength' return if scan_angleMin == scan_angleMax: print 'the min q and max q can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] if Plot2.ds != None: Plot2.clear_masks() Plot2.add_dataset(dummy) Plot2.title = 'Preview' Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.x_range = [1e-6, q[-1]] for i in xrange(1, len(q)): xL = q[i - 1] xH = q[i ] Plot2.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot2.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # print "angles" # print angles # print q print '' print 'scan q-range [%f, %f]' % (q[0], q[-1]) print '' def openDataset(path): ds = df[str(path)] ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm') # ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm_xy') ds.__iDictionary__.addEntry('time', 'entry1/instrument/detector/time') ds.__iDictionary__.addEntry('m1om', 'entry1/instrument/crystal/m1om') ds.__iDictionary__.addEntry('m1chi', 'entry1/instrument/crystal/m1chi') ds.__iDictionary__.addEntry('m1x', 'entry1/instrument/crystal/m1x') ds.__iDictionary__.addEntry('m2om', 'entry1/instrument/crystal/m2om') ds.__iDictionary__.addEntry('m2chi', 'entry1/instrument/crystal/m2chi') ds.__iDictionary__.addEntry('m2x', 'entry1/instrument/crystal/m2x') ds.__iDictionary__.addEntry('m2y', 'entry1/instrument/crystal/m2y') ds.__iDictionary__.addEntry('mdet', 'entry1/instrument/crystal/mdet') ds.__iDictionary__.addEntry('pmom', 'entry1/instrument/crystal/pmom') ds.__iDictionary__.addEntry('pmchi', 'entry1/instrument/crystal/pmchi') ds.__iDictionary__.addEntry('ss1u', 'entry1/instrument/slits/ss1u') ds.__iDictionary__.addEntry('ss1d', 'entry1/instrument/slits/ss1d') ds.__iDictionary__.addEntry('ss1r', 'entry1/instrument/slits/ss1r') ds.__iDictionary__.addEntry('ss1l', 'entry1/instrument/slits/ss1l') ds.__iDictionary__.addEntry('ss2u', 'entry1/instrument/slits/ss2u') ds.__iDictionary__.addEntry('ss2d', 'entry1/instrument/slits/ss2d') ds.__iDictionary__.addEntry('ss2r', 'entry1/instrument/slits/ss2r') ds.__iDictionary__.addEntry('ss2l', 'entry1/instrument/slits/ss2l') ds.__iDictionary__.addEntry('ss1vo', 'entry1/instrument/slits/ss1vo') ds.__iDictionary__.addEntry('ss1vg', 'entry1/instrument/slits/ss1vg') ds.__iDictionary__.addEntry('ss1ho', 'entry1/instrument/slits/ss1ho') ds.__iDictionary__.addEntry('ss1hg', 'entry1/instrument/slits/ss1hg') ds.__iDictionary__.addEntry('ss2vo', 'entry1/instrument/slits/ss2vo') ds.__iDictionary__.addEntry('ss2vg', 'entry1/instrument/slits/ss2vg') ds.__iDictionary__.addEntry('ss2ho', 'entry1/instrument/slits/ss2ho') ds.__iDictionary__.addEntry('ss2hg', 'entry1/instrument/slits/ss2hg') ds.__iDictionary__.addEntry('samplename', 'entry1/sample/name') ds.__iDictionary__.addEntry('wavelength', 'entry1/instrument/crystal/wavelength') ds.__iDictionary__.addEntry('TimeStamp', 'entry1/time_stamp') return ds def btnPlot_clicked(): #Plot1.clear() #Plot2.clear() fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return path = fns[0] basename = os.path.basename(str(path)) basename = basename[:basename.find('.nx.hdf')] ds = openDataset(path) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] samplename = str(ds.samplename) sorting = scan_variable_sorting.value if sorting: info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) Plot1.clear() if str(combine_mode.value) == 'individual': for tid in tids: if ds.hmm.ndim == 4: data[:] = ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] = ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] # dataF = data.float_copy() # dataF.title = 'Tube %i' % tid # Plot1.add_dataset(dataF) Plot1.title = 'Count Rate (individual)' else: for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] data.title = 'Tubes ' + str(tids) Plot1.set_dataset(data) Plot1.set_mouse_follower_precision(6, 2, 2) Plot1.title = basename + ' (combined): ' + samplename # Plot1.title = Plot1.title + ' ' + basename if Plot1.ds is not None: Plot1.x_label = str(scan_variable_plot.value) Plot1.y_label = 'counts per sec' Plot2.clear() time.sleep(0.3) ds0 = Plot1.ds[0] # # don't understand how this works xMax = 0 yMax = 0 for i in xrange(len(ds0)): if yMax < ds0[i]: xMax = ds0.axes[0][i] yMax = ds0[i] peakangle = xMax q = convert2q(scanVariable, peakangle, ds.wavelength) data.axes[0] = q[:] Plot2.set_dataset(data) Plot2.set_mouse_follower_precision(6, 2, 2) Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' # Plot1.title = 'Main Detector ' + basename + ': ' + samplename # Plot2.title = 'Sample: ' + samplename + '; ' + sampledescription Plot2.title = basename + ' (combined): ' + samplename Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.set_marker_on(True) # plotXMax = Par('float', q[-1]) # Plot2.x_range = [1e-6, plotXMax.value] if q[-1] > 1e-6 : Plot2.x_range = [1e-6, q[-1]] def convert2q(angles, reference, wavelength): if wavelength is list: wavelength = wavelength[0] wavelength = float(wavelength) deg2rad = 3.14159265359 / 180 f = 4 * 3.14159265359 / wavelength if bool(negative_steps.value): f *= -1.0 q = [(f * sin(deg2rad * (angle - reference) / 2)) for angle in angles] return q def __run_script__(fns): # Use the provided resources, please don't remove. global Plot1 global Plot2 global Plot3 print 'please press "Run Single Scan" or "Run Multiple Scans"' btnPlot_clicked() def __dispose__(): global Plot1 global Plot2 global Plot3 Plot1.clear() Plot2.clear() Plot3.clear() # # model class ConfigurationModel: def __init__(self): self.scanVariable = str(scan_variable.value) self.scanVariable = self.scanVariable[:self.scanVariable.find(' ')] self.crystal = str(crystal_name.value) self.mode = str(scan_mode.value) self.scan = getScan() self.scan_reference = scan_reference.value self.logscale = bool(logscale_position.value) self.negative = bool(negative_steps.value) self.stepInfo = [] for step in stepInfo: d = dict() for key in step.keys(): d[key] = step[key].value self.stepInfo.append(d); self.user_name = str(user_name.value) self.user_email = str(user_email.value) self.sample_name = str(sample_name.value) self.sample_description = str(sample_description.value) self.sample_thickness = float(sample_thickness.value) # vertical/horizontal pre-slit self.ss1vg = float(pss_ss1vg.value) self.ss1vo = float(pss_ss1vo.value) self.ss1hg = float(pss_ss1hg.value) self.ss1ho = float(pss_ss1ho.value) # vertical/horizontal post-slit self.ss2vg = float(pss_ss2vg.value) self.ss2vo = float(pss_ss2vo.value) self.ss2hg = float(pss_ss2hg.value) self.ss2ho = float(pss_ss2ho.value) # load sample positions self.sample_stage = str(scan_sample_stage.value) self.sample_position = str(scan_sample_position.value) self.min_time = int(scan_min_time.value) # load early exit self.early_exit_enabled = bool(early_exit_enabled.value) self.bkg_frames = int(background_frames.value) self.bkg_threshold = float(background_threshold.value) def apply(self): for option in scan_variable.options: if self.scanVariable == option[:option.find(' ')]: scan_variable.value = option crystal_name.value = self.crystal scan_mode.value = self.mode logscale_position.value = self.logscale negative_steps.value = self.negative scan_reference.value = self.scan_reference i = 0 for step in self.stepInfo: for key in step.keys(): stepInfo[i][key].value = step[key] setEnabled(i) i += 1 setScanMode() user_name.value = self.user_name user_email.value = self.user_email sample_name.value = self.sample_name sample_description.value = self.sample_description sample_thickness.value = self.sample_thickness # vertical/horizontal pre-slit pss_ss1vg.value = self.ss1vg pss_ss1vo.value = self.ss1vo pss_ss1hg.value = self.ss1hg pss_ss1ho.value = self.ss1ho # vertical/horizontal post-slit pss_ss2vg.value = self.ss2vg pss_ss2vo.value = self.ss2vo pss_ss2hg.value = self.ss2hg pss_ss2ho.value = self.ss2ho # load sample positions scan_sample_position.value = self.sample_position scan_sample_stage.value = self.sample_stage scan_min_time.value = self.min_time # load early exit early_exit_enabled.value = self.early_exit_enabled background_frames.value = self.bkg_frames background_threshold.value = self.bkg_threshold if early_exit_enabled.value : background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False
# Python import StringIO from collections import Counter, OrderedDict # Django from django.core import urlresolvers from django_countries import countries from django.db import connection from django.db.models import F, Count, Sum, FieldDoesNotExist from django.contrib.sites.shortcuts import get_current_site # 3rd Party import xlsxwriter # Project from forms.models import ( NewspaperSheet, NewspaperPerson, TelevisionJournalist, person_models, sheet_models, journalist_models) from forms.modelutils import (TOPICS, GENDER, SPACE, OCCUPATION, FUNCTION, SCOPE, YESNO, AGES, SOURCE, VICTIM_OF, SURVIVOR_OF, IS_PHOTOGRAPH, AGREE_DISAGREE, RETWEET, TV_ROLE, MEDIA_TYPES, CountryRegion) from report_details import WS_INFO, REGION_COUNTRY_MAP, MAJOR_TOPICS, TOPIC_GROUPS, GROUP_TOPICS_MAP, FORMATS def has_field(model, fld): try: model._meta.get_field(fld) return True except FieldDoesNotExist: return False def p(n, d): """ Helper to calculate the percentage of n / d, returning 0 if d == 0. """ if d == 0: return 0.0 return float(n) / d def get_regions(): """ Return a (id, region_name) list for all regions """ country_regions = CountryRegion.objects\ .values('region')\ .exclude(region='Unmapped') regions = set(item['region'] for item in country_regions) return [(i, region) for i, region in enumerate(regions)] def get_countries(selected=None): """ Return a (code, country) list for countries captured. """ captured_country_codes = set() for model in sheet_models.itervalues(): rows = model.objects.values('country') captured_country_codes.update([r['country'] for r in rows]) return [(code, name) for code, name in list(countries) if code in captured_country_codes] def get_region_countries(region): """ Return a (code, country) list for a region. """ if region == 'ALL': return get_countries() else: country_codes = REGION_COUNTRY_MAP[region] return [(code, name) for code, name in list(countries) if code in country_codes] def get_country_region(country): """ Return a (id, region_name) list to which a country belongs. """ if country == 'ALL': return get_regions() else: return [(0, [k for k, v in REGION_COUNTRY_MAP.items() if country in v][0])] def clean_title(text): """ Return the string passed in stripped of its numbers and parentheses """ if text != "Congo (the Democratic Republic of the)": return text[text.find(')')+1:].lstrip() return text class XLSXDataExportBuilder(): def __init__(self, request): self.domain = "http://%s" % get_current_site(request).domain self.sheet_exclude_fields = ['monitor', 'url_and_multimedia', 'time_accessed', 'country_region'] self.person_exclude_fields = [] self.journalist_exclude_fields =[] self.sheet_fields_with_id = ['topic', 'scope', 'person_secondary', 'inequality_women', 'stereotypes'] self.person_fields_with_id = ['sex', 'age', 'occupation', 'function', 'survivor_of', 'victim_of'] self.journalist_fields_with_id = ['sex', 'age'] def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) for model in sheet_models.itervalues(): self.create_sheet_export(model, workbook) for model in person_models.itervalues(): self.create_person_export(model, workbook) for model in journalist_models.itervalues(): self.create_journalist_export(model, workbook) workbook.close() output.seek(0) return output.read() def create_sheet_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all() row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.sheet_fields_with_id) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_sheet_row(obj, ws, row+y, col, fields, self.sheet_fields_with_id) def create_person_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.person_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.person_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_person_row(obj, ws, row+y, col, fields, self.person_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def create_journalist_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.journalist_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.journalist_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_journalist_row(obj, ws, row+y, col, fields, self.journalist_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def write_ws_titles(self, ws, row, col, fields, fields_with_id, append_sheet=False): """ Writes the column titles to the worksheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name :param append_sheet: Boolean specifying whether the related sheet object needs to be appended to the row. """ if not append_sheet: for field in fields: ws.write(row, col, unicode(field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode(field.name+"_id")) col += 1 ws.write(row, col, unicode('edit_url')) col += 1 else: for field in fields: ws.write(row, col, unicode("sheet_" + field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode("sheet_" + field.name + "_id")) col += 1 ws.write(row, col, unicode('sheet_edit_url')) col += 1 return ws, col def write_sheet_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Sheet models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'country': ws.write(row, col, getattr(obj, field.name).code) elif field.name == 'topic': ws.write(row, col, unicode(TOPICS[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, TOPICS[getattr(obj, field.name)-1][0]) elif field.name == 'scope': ws.write(row, col, unicode(SCOPE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, SCOPE[getattr(obj, field.name)-1][0]) elif field.name == 'person_secondary': ws.write(row, col, unicode(SOURCE[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SOURCE[getattr(obj, field.name)][0]) elif field.name == 'inequality_women': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'stereotypes': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) else: try: ws.write(row, col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row, col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( obj._meta.app_label, obj._meta.model_name), args=(obj.id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_person_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Person models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age': ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == 'occupation': ws.write(row, col, unicode(OCCUPATION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, OCCUPATION[getattr(obj, field.name)][0]) elif field.name == 'function': ws.write(row, col, unicode(FUNCTION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, FUNCTION[getattr(obj, field.name)][0]) elif field.name == 'victim_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(VICTIM_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, VICTIM_OF[getattr(obj, field.name)][0]) elif field.name == 'survivor_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(SURVIVOR_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SURVIVOR_OF[getattr(obj, field.name)][0]) elif field.name == 'is_photograph': ws.write(row, col, unicode(IS_PHOTOGRAPH[getattr(obj, field.name)-1][1])) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in self.person_fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_journalist_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Journalist models to the worksheet :param obj: Reference to the model instance which is being written to the sheet_fields_with_id :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet_fields_with_id :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col class XLSXReportBuilder: def __init__(self, form): from reports.views import CountryForm, RegionForm self.form = form if isinstance(form, CountryForm): self.countries = form.filter_countries() self.regions = get_country_region(form.cleaned_data['country']) self.report_type = 'country' elif isinstance(form, RegionForm): region = [name for i, name in form.REGIONS if str(i) == form.cleaned_data['region']][0] self.countries = get_region_countries(region) self.regions = [(0, region)] self.report_type = 'region' else: self.countries = get_countries() self.regions = get_regions() self.report_type = 'global' self.country_list = [code for code, name in self.countries] self.region_list = [name for id, name in self.regions] # Various gender utilities self.male_female = [(id, value) for id, value in GENDER if id in [1, 2]] self.male_female_ids = [id for id, value in self.male_female] self.female = [(id, value) for id, value in GENDER if id == 1] self.yes = [(id, value) for id, value in YESNO if id == 'Y'] self.gmmp_year = '2015' def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) # setup formats self.heading = workbook.add_format(FORMATS['heading']) self.col_heading = workbook.add_format(FORMATS['col_heading']) self.col_heading_def = workbook.add_format(FORMATS['col_heading_def']) self.sec_col_heading = workbook.add_format(FORMATS['sec_col_heading']) self.sec_col_heading_def = workbook.add_format(FORMATS['sec_col_heading_def']) self.label = workbook.add_format(FORMATS['label']) self.N = workbook.add_format(FORMATS['N']) self.P = workbook.add_format(FORMATS['P']) # Use the following for specifying which reports to create during dev # test_functions = [ # 'ws_01', 'ws_02', 'ws_04', 'ws_05', 'ws_06', 'ws_07', 'ws_08', 'ws_09', 'ws_10', # 'ws_11', 'ws_12', 'ws_13', 'ws_14', 'ws_15', 'ws_16', 'ws_17', 'ws_18', 'ws_19', 'ws_20', # 'ws_21', 'ws_23', 'ws_24', 'ws_25', 'ws_26', 'ws_27', 'ws_28', 'ws_29', 'ws_30', # 'ws_31', 'ws_32', 'ws_34', 'ws_35', 'ws_36', 'ws_38', 'ws_39', 'ws_40', # 'ws_41', 'ws_42', 'ws_43', 'ws_44', 'ws_45', 'ws_46', 'ws_47', 'ws_48', # 'ws_49', 'ws_50', 'ws_51', 'ws_52', 'ws_53', 'ws_54', 'ws_55', 'ws_56',, 'ws_57', 'ws_58', 'ws_59', 'ws_60', # 'ws_61', 'ws_62', 'ws_63', 'ws_64', 'ws_65', 'ws_66', 'ws_67', 'ws_68', 'ws_69', 'ws_70', # 'ws_76', 'ws_77', 'ws_78', 'ws_79'] test_functions = ['ws_66'] sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) for function in test_functions: if self.report_type in sheet_info[function]['reports']: ws = workbook.add_worksheet(sheet_info[function]['name']) self.write_headers(ws, sheet_info[function]['title'], sheet_info[function]['desc']) getattr(self, function)(ws) # ------------------------------------------------------------------- # To ensure ordered worksheets # sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) # for ws_num, ws_info in sheet_info.iteritems(): # if self.report_type in ws_info['reports']: # ws = workbook.add_worksheet(ws_info['name']) # self.write_headers(ws, ws_info['title'], ws_info['desc']) # getattr(self, ws_num)(ws) workbook.close() output.seek(0) return output.read() def dictfetchall(self, cursor): """ Returns all rows from a cursor as a dict """ desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ] def apply_weights(self, rows, db_table, media_type): """ param rows: Queryset to apply the weights to param db_table: name of relevant sheet table param: media_type: media type to weigh by """ query = rows.extra( tables=['reports_weights'], where=[ 'reports_weights.country = %s.country' % (db_table), 'reports_weights.media_type = \'%s\'' % (media_type), ]).annotate() raw_query, params = query.query.sql_with_params() raw_query = raw_query.replace('SELECT', 'SELECT cast(round(SUM(reports_weights.weight)) as int) AS "n", ') cursor = connection.cursor() cursor.execute(raw_query, params) return self.dictfetchall(cursor) def ws_01(self, ws): """ Cols: Media Type Rows: Region """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country_region__region')\ .filter(country_region__region__in=self.region_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: if row['region'] is not None: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [region[0] for region in self.regions if region[1] == row['region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_02(self, ws): """ Cols: Media Type Rows: Region, Country """ r = 6 self.write_col_headings(ws, MEDIA_TYPES) counts = Counter() for region_id, region in self.regions: for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: if row['country'] is not None: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['country']): row['n']}) self.write_primary_row_heading(ws, region, r=r) region_countries = [(code, country) for code, country in self.countries if code in REGION_COUNTRY_MAP[region]] self.tabulate(ws, counts, MEDIA_TYPES, region_countries, row_perc=True, sec_row=True, r=r) r += len(region_countries) def ws_04(self, ws): """ Cols: Region, Media type Rows: Major Topic """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('topic')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] major_topic = TOPIC_GROUPS[r['topic']] counts.update({(media_id, major_topic): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, MEDIA_TYPES, MAJOR_TOPICS, row_perc=False, sec_cols=10) def ws_05(self, ws): """ Cols: Subject sex Rows: Major Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, self.male_female, MAJOR_TOPICS, row_perc=True, display_cols=self.female) def ws_06(self, ws): """ Cols: Region, Subject sex: female only Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in person_models.iteritems(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country_region__region':region})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, MAJOR_TOPICS, row_perc=True, sec_cols=2, display_cols=self.female) def ws_07(self, ws): """ Cols: Media Type Rows: Subject Sex """ counts = Counter() for media_type, model in person_models.iteritems(): rows = model.objects\ .values('sex')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, r['sex']): r['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.male_female, row_perc=False) def ws_08(self, ws): """ Cols: Subject Sex Rows: Scope """ counts = Counter() for media_type, model in person_models.iteritems(): if 'scope' in model.sheet_field().rel.to._meta.get_all_field_names(): scope = '%s__scope' % model.sheet_name() rows = model.objects\ .values('sex', scope)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['scope']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SCOPE, row_perc=True, display_cols=self.female) def ws_09(self, ws): """ Cols: Subject Sex Rows: Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_10(self, ws): """ Cols: Space Rows: Minor Topics :: Newspaper Sheets only """ # Calculate row values for column counts = Counter() for media_type, model in sheet_models.iteritems(): if media_type == 'Print': rows = model.objects\ .values('space', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['space'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, SPACE, MAJOR_TOPICS, row_perc=False) def ws_11(self, ws): """ Cols: Equality Rights Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_12(self, ws): """ Cols: Region, Equality Rights Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region_name in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): # Some models has no equality rights field if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country_region__region=region_name) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_13(self, ws): """ Cols: Journalist Sex, Equality Rights Rows: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): topic = '%s__topic' % model.sheet_name() equality_rights = '%s__equality_rights' % model.sheet_name() rows = model.objects\ .values(equality_rights, topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex=gender_id) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_14(self, ws): """ Cols: Sex Rows: Occupation """ counts = Counter() for media_type, model in person_models.iteritems(): # some Person models don't have an occupation field if 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, OCCUPATION, row_perc=True, display_cols=self.female) def ws_15(self, ws): """ Cols: Sex Rows: Function """ counts = Counter() for media_type, model in person_models.iteritems(): # some Person models don't have a function field if 'function' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, FUNCTION, row_perc=True, display_cols=self.female) def ws_16(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() for function_id, function in FUNCTION: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=function_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_17(self, ws): """ Cols: Age, Sex of Subject Rows: Function """ secondary_counts = OrderedDict() for age_id, age in AGES: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'age' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(age=age_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) secondary_counts[age] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, FUNCTION, row_perc=False, sec_cols=4) def ws_18(self, ws): """ Cols: Sex Rows: Age :: Only for print """ counts = Counter() rows = NewspaperPerson.objects\ .values('sex', 'age')\ .filter(newspaper_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, NewspaperPerson.sheet_db_table(), 'Print') counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_19(self, ws): """ Cols: Sex Rows: Age :: Only for broadcast """ counts = Counter() broadcast = ['Television'] for media_type, model in person_models.iteritems(): if media_type in broadcast: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_20(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() functions_count = Counter() # Get top 5 functions for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) functions_count.update({(r['function']): r['n'] for r in rows}) top_5_function_ids = [id for id, count in sorted(functions_count.items(), key=lambda x: -x[1])[:5]] top_5_functions = [(id, func) for id, func in FUNCTION if id in top_5_function_ids] for func_id, function in top_5_functions: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=func_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_21(self, ws): """ Cols: Subject Sex Rows: Victim type """ counts = Counter() for media_type, model in person_models.iteritems(): if 'victim_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'victim_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .exclude(victim_of=None) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['victim_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, VICTIM_OF, row_perc=False) def ws_23(self, ws): """ Cols: Subject Sex Rows: Survivor type """ counts = Counter() for media_type, model in person_models.iteritems(): if 'survivor_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'survivor_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .exclude(survivor_of=None)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['survivor_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SURVIVOR_OF, row_perc=False) def ws_24(self, ws): """ Cols: Subject Sex Rows: Family Role """ counts = Counter() for media_type, model in person_models.iteritems(): if 'family_role' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_25(self, ws): """ Cols: Journalist Sex, Subject Sex Rows: Family Role """ secondary_counts = OrderedDict() for sex_id, sex in self.male_female: counts = Counter() for media_type, model in person_models.iteritems(): if 'family_role' in model._meta.get_all_field_names(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) secondary_counts[sex] = counts secondary_counts['col_title_def'] = [ 'Sex of reporter', 'Sex of news subject'] self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, YESNO, row_perc=False, sec_cols=4) def ws_26(self, ws): """ Cols: Subject Sex Rows: Whether Quoted """ counts = Counter() for media_type, model in person_models.iteritems(): if 'is_quoted' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['is_quoted']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_27(self, ws): """ Cols: Subject Sex Rows: Photographed """ counts = Counter() for media_type, model in person_models.iteritems(): if 'is_photograph' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['is_photograph']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, IS_PHOTOGRAPH, row_perc=False) def ws_28(self, ws): """ Cols: Medium Rows: Region :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values(region)\ .filter(sex=1)\ .filter(**{region + '__in': self.region_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [r[0] for r in self.regions if r[1] == row['region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_29(self, ws): """ Cols: Regions Rows: Scope :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' scope = sheet_name + '__scope' if 'scope' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, scope)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] counts.update({(region_id, row['scope']): row['n']}) self.tabulate(ws, counts, self.regions, SCOPE, row_perc=False) def ws_30(self, ws): """ Cols: Region Rows: Major Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, topic)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] major_topic = TOPIC_GROUPS[row['topic']] counts.update({(region_id, major_topic): row['n']}) self.tabulate(ws, counts, self.regions, MAJOR_TOPICS, row_perc=False) def ws_31(self, ws): """ Cols: Sex of Reporter Rows: Minor Topics """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_32(self, ws): """ Cols: Medium Rows: Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['topic']): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, TOPICS, row_perc=False) def ws_34(self, ws): """ Cols: Sex of reporter Rows: Sex of subject """ counts = Counter() for media_type, model in person_models.iteritems(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) # rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of reporter' self.tabulate(ws, counts, self.male_female, GENDER, row_perc=True, display_cols=self.female) def ws_35(self, ws): """ Cols: Sex of reporter Rows: Age of reporter :: Only for television """ counts = Counter() rows = TelevisionJournalist.objects\ .values('sex', 'age')\ .filter(television_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, TelevisionJournalist.sheet_db_table(), 'Television') counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True, display_cols=self.female) def ws_36(self, ws): """ Cols: Sex of Reporter Rows: Focus: about women """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() about_women = sheet_name + '__about_women' if 'about_women' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', about_women)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['about_women']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=True) def ws_38(self, ws): """ Cols: Focus: about women Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['about_women'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_39(self, ws): """ Cols: Focus: about women Rows: Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=True) def ws_40(self, ws): """ Cols: Region, Topics Rows: Focus: about women """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'about_women')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=2, display_cols=self.yes) def ws_41(self, ws): """ Cols: Equality rights raised Rows: Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=False) def ws_42(self, ws): """ Cols: Region, Equality rights raised Rows: Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_43(self, ws): """ Cols: Sex of reporter, Equality rights raised Cols: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic, equality_rights)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=gender_id) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=4) def ws_44(self, ws): """ Cols: Sex of reporter, Equality rights raised Rows: Region """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(equality_rights, region)\ .filter(sex=gender_id)\ .filter(**{region + '__in':self.region_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: region_id = [id for id, name in self.regions if name == r['region']][0] counts.update({(r['equality_rights'], region_id): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.regions, row_perc=False, sec_cols=4) def ws_45(self, ws): """ Cols: Sex of news subject Rows: Region :: Equality rights raised == Yes """ counts = Counter() for media_type, model in person_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): region = model.sheet_name() + '__country_region__region' equality_rights = model.sheet_name() + '__equality_rights' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in':self.region_list})\ .filter(**{equality_rights:'Y'}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: region_id = [id for id, name in self.regions if name == r['region']][0] counts.update({(r['sex'], region_id): r['n']}) self.tabulate(ws, counts, self.male_female, self.regions, row_perc=True) def ws_46(self, ws): """ Cols: Region, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(TOPIC_GROUPS[r['topic']], r['stereotypes']): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True, sec_cols=8) def ws_47(self, ws): """ Cols: Stereotypes Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True) def ws_48(self, ws): """ Cols: Sex of reporter, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' stereotypes = sheet_name + '__stereotypes' if 'stereotypes' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(stereotypes, topic)\ .filter(sex=gender_id)\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=False, sec_cols=8) def ws_49(self, ws): """ Cols: Major Topics Rows: Region :: Internet media type only """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country_region__region')\ .filter(country_region__region__in=self.region_list) rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, region_id): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.regions, row_perc=True) def ws_50(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories shared on Twitter """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(shared_via_twitter='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_51(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories shared on Facebook """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(shared_on_facebook='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_52(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories with reference to gener equality """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(equality_rights='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_53(self, ws): """ Cols: Topic Rows: Country :: Internet media type only :: Female reporters only """ display_cols = [(id, value) for id, value in GENDER if id==1] secondary_counts = OrderedDict() model = sheet_models.get('Internet') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() journo_sex_field = '%s__sex' % model.journalist_field_name() rows = model.objects\ .values(journo_sex_field, 'country')\ .filter(topic__in=topic_ids) rows = self.apply_weights(rows, model._meta.db_table, 'Internet') counts.update({(r['sex'], r['country']): r['n'] for r in rows}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, display_cols=display_cols, sec_cols=2) def ws_54(self, ws): """ Cols: Major Topic, sex of subject Rows: Country :: Internet media type only """ secondary_counts = OrderedDict() model = person_models.get('Internet') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('sex', country_field)\ .filter(**{model.sheet_name() + '__topic__in':topic_ids}) rows = self.apply_weights(rows, model.sheet_db_table(), 'Internet') counts.update({(r['sex'], r['country']): r['n'] for r in rows}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, sec_cols=8) def ws_55(self, ws): """ Cols: Occupation Rows: Country :: Show all countries :: Only female subjects :: Internet media type only """ counts = Counter() model = person_models.get('Internet') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'occupation')\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r['occupation'], r['country']): r['n'] for r in rows}) self.tabulate(ws, counts, OCCUPATION, self.countries, row_perc=True) def ws_56(self, ws): """ Cols: Function Rows: Country :: Show all countries :: Internet media type only """ counts = Counter() model = person_models.get('Internet') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'function')\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r['function'], r['country']): r['n'] for r in rows}) self.tabulate(ws, counts, FUNCTION, self.countries, row_perc=True) def ws_57(self, ws): """ Cols: Sex of subject Rows: Country, Family role :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['family_role']): row['n'] for row in rows} # If only captured countries should be displayed use # if counts.keys(): self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_58(self, ws): """ Cols: Sex of subject Rows: Country, is photographed :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['is_photograph']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, IS_PHOTOGRAPH, row_perc=True, sec_row=True, r=r) r += len(IS_PHOTOGRAPH) def ws_59(self, ws): """ Cols: Sex of reporter Rows: Sex of subject :: Internet media only """ counts = Counter() model = person_models.get('Internet') sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of reporter' self.tabulate(ws, counts, GENDER, GENDER, row_perc=False) def ws_60(self, ws): """ Cols: Sex of subject Rows: Country, age :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['age']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, AGES, row_perc=True, sec_row=True, r=r) r += len(AGES) def ws_61(self, ws): """ Cols: Sex of subject Rows: Country, is_quoted :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['is_quoted']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_62(self, ws): """ Cols: Topic Rows: Country, equality raised :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['equality_rights']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_63(self, ws): """ Cols: Topic Rows: Country, stereotypes challenged :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_64(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_65(self, ws): """ Cols: Topic Rows: Country, tweet or retweet :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'retweet')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['retweet']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, RETWEET, row_perc=False, sec_row=True, r=r) r += len(RETWEET) def ws_66(self, ws): """ Cols: Topic Rows: Country, sex of news subject :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = person_models.get('Twitter') topic_field = '%s__topic' % model.sheet_name() for code, country in self.countries: rows = model.objects\ .values(topic_field, 'sex')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Twitter") counts.update({(row['topic'], row['sex']): row['n'] for row in rows}) self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, GENDER, row_perc=True, sec_row=True, r=r) r += len(GENDER) def ws_67(self, ws): """ Cols: Topic Rows: Country :: Only female journalists :: Show all countries :: Twitter media type only """ counts = Counter() model = sheet_models.get('Twitter') rows = model.objects\ .values('topic', 'country')\ .filter(**{model.journalist_field_name() + '__sex':1}) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts.update({(row['topic'], row['country']): row['n'] for row in rows}) self.tabulate(ws, counts, TOPICS, self.countries, row_perc=True, sec_row=False) def ws_68(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=False, sec_row=True, r=r) r += len(YESNO) def ws_69(self, ws): """ Cols: Topic Rows: Country, stereotypes :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_76(self, ws): """ Cols: Topic, Stereotypes Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'country')\ .filter(topic=topic_id) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['stereotypes'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, self.countries, row_perc=True, sec_cols=8) def ws_77(self, ws): """ Cols: Topic, Reference to gender equality Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'country')\ .filter(topic=topic_id) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.countries, row_perc=True, sec_cols=4) def ws_78(self, ws): """ Cols: Topic, victim_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in person_models.iteritems(): if 'victim_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('victim_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['victim_of'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, VICTIM_OF, self.countries, row_perc=True, sec_cols=18) def ws_79(self, ws): """ Cols: Topic, survivor_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in person_models.iteritems(): if 'survivor_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('survivor_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['survivor_of'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, SURVIVOR_OF, self.countries, row_perc=True, sec_cols=18) # ------------------------------------------------------------------------------- # Helper functions # def write_headers(self, ws, title, description): """ Write the headers to the worksheet """ ws.write(0, 0, title, self.heading) ws.write(1, 0, description, self.heading) ws.write(3, 2, self.gmmp_year, self.heading) def write_col_headings(self, ws, cols, c=2, r=4): """ :param ws: worksheet to write to :param cols: list of `(col_id, col_title)` tuples of column ids and titles :param r, c: initial position where cursor should start writing to """ for col_id, col_title in cols: ws.write(r, c, clean_title(col_title), self.col_heading) ws.write(r + 1, c, "N") ws.write(r + 1, c + 1, "%") c += 2 def write_primary_row_heading(self, ws, heading, c=0, r=6): """ :param ws: worksheet to write to :param heading: row heading to write :param r, c: position where heading should be written to """ ws.write(r, c, clean_title(heading), self.heading) def tabulate_secondary_cols(self, ws, secondary_counts, cols, rows, row_perc=False, display_cols=None, sec_cols=4): """ :param ws: worksheet to write to :param secondary_counts: dict in following format: {'Primary column heading': Count object, ...} :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_cols: amount of cols needed for secondary cols """ r, c = 7, 1 # row titles for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 if 'col_title_def' in secondary_counts: # Write definitions of column heading titles ws.write(r-3, c-1, secondary_counts['col_title_def'][0], self.sec_col_heading_def) ws.write(r-2, c-1, secondary_counts['col_title_def'][1], self.col_heading_def) secondary_counts.pop('col_title_def') for field, counts in secondary_counts.iteritems(): ws.merge_range(r-3, c, r-3, c+sec_cols-1, clean_title(field), self.sec_col_heading) self.tabulate(ws, counts, cols, rows, row_perc=row_perc, sec_col=True, display_cols=display_cols, r=7, c=c) c += sec_cols def tabulate(self, ws, counts, cols, rows, row_perc=False, sec_col=False, sec_row=False, display_cols=None, c=1, r=6): """ Emit a table. :param ws: worksheet to write to :param dict counts: dict from `(col_id, row_id)` tuples to count for that combination. :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_col: Are wecreating a secondary column title(default: False) :param sec_row: Are we creating a secondary row title(default: False) :param display_cols: Optional if only a subset of columns should be displayed e.g. only female :param r, c: initial position where cursor should start writing to """ if row_perc: # we'll need percentage by rows row_totals = {} for row_id, row_title in rows: row_totals[row_id] = sum(counts.get((col_id, row_id), 0) for col_id, _ in cols) # noqa # row titles if not sec_col: # Else already written for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 # if only filtered results should be shown # e.g. only print female columns if display_cols: cols = display_cols if 'col_title_def' in counts and not sec_row: ws.write(r - 2, c-1, counts['col_title_def'], self.col_heading_def) counts.pop('col_title_def') # values, written by column for col_id, col_heading in cols: # column title if not sec_row: ws.merge_range(r-2, c, r-2, c+1, clean_title(col_heading), self.col_heading) ws.write(r - 1, c, "N", self.label) ws.write(r - 1, c + 1, "%", self.label) if not row_perc: # column totals # Confirm: Perc of col total or matrix total? # total = sum(counts.itervalues()) total = sum(counts.get((col_id, row_id), 0) for row_id, _ in rows) # row values for this column for i, row in enumerate(rows): row_id, row_title = row if row_perc: # row totals total = row_totals[row_id] n = counts.get((col_id, row_id), 0) ws.write(r + i, c, n, self.N) ws.write(r + i, c + 1, p(n, total), self.P) c += 2 Make small fixes # Python import StringIO from collections import Counter, OrderedDict # Django from django.core import urlresolvers from django_countries import countries from django.db import connection from django.db.models import F, Count, Sum, FieldDoesNotExist from django.contrib.sites.shortcuts import get_current_site # 3rd Party import xlsxwriter # Project from forms.models import ( NewspaperSheet, NewspaperPerson, TelevisionJournalist, person_models, sheet_models, journalist_models) from forms.modelutils import (TOPICS, GENDER, SPACE, OCCUPATION, FUNCTION, SCOPE, YESNO, AGES, SOURCE, VICTIM_OF, SURVIVOR_OF, IS_PHOTOGRAPH, AGREE_DISAGREE, RETWEET, TV_ROLE, MEDIA_TYPES, CountryRegion) from report_details import WS_INFO, REGION_COUNTRY_MAP, MAJOR_TOPICS, TOPIC_GROUPS, GROUP_TOPICS_MAP, FORMATS def has_field(model, fld): try: model._meta.get_field(fld) return True except FieldDoesNotExist: return False def p(n, d): """ Helper to calculate the percentage of n / d, returning 0 if d == 0. """ if d == 0: return 0.0 return float(n) / d def get_regions(): """ Return a (id, region_name) list for all regions """ country_regions = CountryRegion.objects\ .values('region')\ .exclude(region='Unmapped') regions = set(item['region'] for item in country_regions) return [(i, region) for i, region in enumerate(regions)] def get_countries(selected=None): """ Return a (code, country) list for countries captured. """ captured_country_codes = set() for model in sheet_models.itervalues(): rows = model.objects.values('country') captured_country_codes.update([r['country'] for r in rows]) return [(code, name) for code, name in list(countries) if code in captured_country_codes] def get_region_countries(region): """ Return a (code, country) list for a region. """ if region == 'ALL': return get_countries() else: country_codes = REGION_COUNTRY_MAP[region] return [(code, name) for code, name in list(countries) if code in country_codes] def get_country_region(country): """ Return a (id, region_name) list to which a country belongs. """ if country == 'ALL': return get_regions() else: return [(0, [k for k, v in REGION_COUNTRY_MAP.items() if country in v][0])] def clean_title(text): """ Return the string passed in stripped of its numbers and parentheses """ if text != "Congo (the Democratic Republic of the)": return text[text.find(')')+1:].lstrip() return text class XLSXDataExportBuilder(): def __init__(self, request): self.domain = "http://%s" % get_current_site(request).domain self.sheet_exclude_fields = ['monitor', 'url_and_multimedia', 'time_accessed', 'country_region'] self.person_exclude_fields = [] self.journalist_exclude_fields =[] self.sheet_fields_with_id = ['topic', 'scope', 'person_secondary', 'inequality_women', 'stereotypes'] self.person_fields_with_id = ['sex', 'age', 'occupation', 'function', 'survivor_of', 'victim_of'] self.journalist_fields_with_id = ['sex', 'age'] def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) for model in sheet_models.itervalues(): self.create_sheet_export(model, workbook) for model in person_models.itervalues(): self.create_person_export(model, workbook) for model in journalist_models.itervalues(): self.create_journalist_export(model, workbook) workbook.close() output.seek(0) return output.read() def create_sheet_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all() row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.sheet_fields_with_id) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_sheet_row(obj, ws, row+y, col, fields, self.sheet_fields_with_id) def create_person_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.person_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.person_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_person_row(obj, ws, row+y, col, fields, self.person_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def create_journalist_export(self, model, wb): ws = wb.add_worksheet(model._meta.object_name) obj_list = model.objects.all().prefetch_related(model.sheet_name()) row, col = 0, 0 fields = [field for field in model._meta.fields if not field.name in self.journalist_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, fields, self.journalist_fields_with_id) sheet_model = model._meta.get_field(model.sheet_name()).rel.to sheet_fields = [field for field in sheet_model._meta.fields if not field.name in self.sheet_exclude_fields] ws, col = self.write_ws_titles(ws, row, col, sheet_fields, self.sheet_fields_with_id, append_sheet=True) row += 1 col = 0 for y, obj in enumerate(obj_list): col = 0 ws, col = self.write_journalist_row(obj, ws, row+y, col, fields, self.journalist_fields_with_id) col += 1 sheet_obj = getattr(obj, model.sheet_name()) ws, col = self.write_sheet_row(sheet_obj, ws, row+y, col, sheet_fields, self.sheet_fields_with_id) def write_ws_titles(self, ws, row, col, fields, fields_with_id, append_sheet=False): """ Writes the column titles to the worksheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name :param append_sheet: Boolean specifying whether the related sheet object needs to be appended to the row. """ if not append_sheet: for field in fields: ws.write(row, col, unicode(field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode(field.name+"_id")) col += 1 ws.write(row, col, unicode('edit_url')) col += 1 else: for field in fields: ws.write(row, col, unicode("sheet_" + field.name)) col += 1 if field.name in fields_with_id: ws.write(row, col, unicode("sheet_" + field.name + "_id")) col += 1 ws.write(row, col, unicode('sheet_edit_url')) col += 1 return ws, col def write_sheet_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Sheet models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'country': ws.write(row, col, getattr(obj, field.name).code) elif field.name == 'topic': ws.write(row, col, unicode(TOPICS[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, TOPICS[getattr(obj, field.name)-1][0]) elif field.name == 'scope': ws.write(row, col, unicode(SCOPE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, SCOPE[getattr(obj, field.name)-1][0]) elif field.name == 'person_secondary': ws.write(row, col, unicode(SOURCE[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SOURCE[getattr(obj, field.name)][0]) elif field.name == 'inequality_women': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'stereotypes': ws.write(row, col, unicode(AGREE_DISAGREE[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, AGREE_DISAGREE[getattr(obj, field.name)-1][0]) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) else: try: ws.write(row, col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row, col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( obj._meta.app_label, obj._meta.model_name), args=(obj.id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_person_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Person models to the worksheet :param obj: Reference to the model instance which is being written to the sheet :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: # Certain fields are 1-indexed if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age': ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == 'occupation': ws.write(row, col, unicode(OCCUPATION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, OCCUPATION[getattr(obj, field.name)][0]) elif field.name == 'function': ws.write(row, col, unicode(FUNCTION[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, FUNCTION[getattr(obj, field.name)][0]) elif field.name == 'victim_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(VICTIM_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, VICTIM_OF[getattr(obj, field.name)][0]) elif field.name == 'survivor_of' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(SURVIVOR_OF[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, SURVIVOR_OF[getattr(obj, field.name)][0]) elif field.name == 'is_photograph': ws.write(row, col, unicode(IS_PHOTOGRAPH[getattr(obj, field.name)-1][1])) elif field.name == 'space': ws.write(row, col, unicode(SPACE[getattr(obj, field.name)-1][1])) elif field.name == 'retweet': ws.write(row, col, unicode(RETWEET[getattr(obj, field.name)-1][1])) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in self.person_fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col def write_journalist_row(self, obj, ws, row, col, fields, fields_with_id): """ Writes a row of data of Journalist models to the worksheet :param obj: Reference to the model instance which is being written to the sheet_fields_with_id :param ws: Reference to the current worksheet :param row, col: y,x postion of the cursor :param fields: list of fields of the model which need to be written to the sheet_fields_with_id :param fields_with_id: fields which need to be written over two columns: id + name """ for field in fields: if field.name == 'sex': ws.write(row, col, unicode(GENDER[getattr(obj, field.name)-1][1])) col += 1 ws.write(row, col, GENDER[getattr(obj, field.name)-1][0]) elif field.name == 'age' and not getattr(obj, field.name) == None: ws.write(row, col, unicode(AGES[getattr(obj, field.name)][1])) col += 1 ws.write(row, col, AGES[getattr(obj, field.name)][0]) elif field.name == obj.sheet_name(): ws.write(row, col, getattr(obj, field.name).id) # Get the parent model and id for building the edit link parent_model = field.related.parent_model parent_id = getattr(obj, field.name).id else: try: ws.write(row,col, unicode(getattr(obj, field.name))) if field.name in fields_with_id: col += 1 except UnicodeEncodeError: ws.write(row,col, unicode(getattr(obj, field.name).encode('ascii', 'replace'))) col += 1 # Write link to end of row change_url = urlresolvers.reverse( 'admin:%s_%s_change' % ( parent_model._meta.app_label, parent_model._meta.model_name), args=(parent_id,)) ws.write_url(row, col, "%s%s" % (self.domain, change_url)) return ws, col class XLSXReportBuilder: def __init__(self, form): from reports.views import CountryForm, RegionForm self.form = form if isinstance(form, CountryForm): self.countries = form.filter_countries() self.regions = get_country_region(form.cleaned_data['country']) self.report_type = 'country' elif isinstance(form, RegionForm): region = [name for i, name in form.REGIONS if str(i) == form.cleaned_data['region']][0] self.countries = get_region_countries(region) self.regions = [(0, region)] self.report_type = 'region' else: self.countries = get_countries() self.regions = get_regions() self.report_type = 'global' self.country_list = [code for code, name in self.countries] self.region_list = [name for id, name in self.regions] # Various gender utilities self.male_female = [(id, value) for id, value in GENDER if id in [1, 2]] self.male_female_ids = [id for id, value in self.male_female] self.female = [(id, value) for id, value in GENDER if id == 1] self.yes = [(id, value) for id, value in YESNO if id == 'Y'] self.gmmp_year = '2015' def build(self): """ Generate an Excel spreadsheet and return it as a string. """ output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) # setup formats self.heading = workbook.add_format(FORMATS['heading']) self.col_heading = workbook.add_format(FORMATS['col_heading']) self.col_heading_def = workbook.add_format(FORMATS['col_heading_def']) self.sec_col_heading = workbook.add_format(FORMATS['sec_col_heading']) self.sec_col_heading_def = workbook.add_format(FORMATS['sec_col_heading_def']) self.label = workbook.add_format(FORMATS['label']) self.N = workbook.add_format(FORMATS['N']) self.P = workbook.add_format(FORMATS['P']) # Use the following for specifying which reports to create during dev # test_functions = [ # 'ws_01', 'ws_02', 'ws_04', 'ws_05', 'ws_06', 'ws_07', 'ws_08', 'ws_09', 'ws_10', # 'ws_11', 'ws_12', 'ws_13', 'ws_14', 'ws_15', 'ws_16', 'ws_17', 'ws_18', 'ws_19', 'ws_20', # 'ws_21', 'ws_23', 'ws_24', 'ws_25', 'ws_26', 'ws_27', 'ws_28', 'ws_29', 'ws_30', # 'ws_31', 'ws_32', 'ws_34', 'ws_35', 'ws_36', 'ws_38', 'ws_39', 'ws_40', # 'ws_41', 'ws_42', 'ws_43', 'ws_44', 'ws_45', 'ws_46', 'ws_47', 'ws_48', # 'ws_49', 'ws_50', 'ws_51', 'ws_52', 'ws_53', 'ws_54', 'ws_55', 'ws_56',, 'ws_57', 'ws_58', 'ws_59', 'ws_60', # 'ws_61', 'ws_62', 'ws_63', 'ws_64', 'ws_65', 'ws_66', 'ws_67', 'ws_68', 'ws_69', 'ws_70', # 'ws_76', 'ws_77', 'ws_78', 'ws_79'] test_functions = ['ws_05'] sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) for function in test_functions: if self.report_type in sheet_info[function]['reports']: ws = workbook.add_worksheet(sheet_info[function]['name']) self.write_headers(ws, sheet_info[function]['title'], sheet_info[function]['desc']) getattr(self, function)(ws) # ------------------------------------------------------------------- # To ensure ordered worksheets # sheet_info = OrderedDict(sorted(WS_INFO.items(), key=lambda t: t[0])) # for ws_num, ws_info in sheet_info.iteritems(): # if self.report_type in ws_info['reports']: # ws = workbook.add_worksheet(ws_info['name']) # self.write_headers(ws, ws_info['title'], ws_info['desc']) # getattr(self, ws_num)(ws) workbook.close() output.seek(0) return output.read() def dictfetchall(self, cursor): """ Returns all rows from a cursor as a dict """ desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ] def apply_weights(self, rows, db_table, media_type): """ param rows: Queryset to apply the weights to param db_table: name of relevant sheet table param: media_type: media type to weigh by """ query = rows.extra( tables=['reports_weights'], where=[ 'reports_weights.country = %s.country' % (db_table), 'reports_weights.media_type = \'%s\'' % (media_type), ]).annotate() raw_query, params = query.query.sql_with_params() raw_query = raw_query.replace('SELECT', 'SELECT cast(round(SUM(reports_weights.weight)) as int) AS "n", ') cursor = connection.cursor() cursor.execute(raw_query, params) return self.dictfetchall(cursor) def ws_01(self, ws): """ Cols: Media Type Rows: Region """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country_region__region')\ .filter(country_region__region__in=self.region_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: if row['region'] is not None: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [region[0] for region in self.regions if region[1] == row['region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_02(self, ws): """ Cols: Media Type Rows: Region, Country """ r = 6 self.write_col_headings(ws, MEDIA_TYPES) counts = Counter() for region_id, region in self.regions: for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('country')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: if row['country'] is not None: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['country']): row['n']}) self.write_primary_row_heading(ws, region, r=r) region_countries = [(code, country) for code, country in self.countries if code in REGION_COUNTRY_MAP[region]] self.tabulate(ws, counts, MEDIA_TYPES, region_countries, row_perc=True, sec_row=True, r=r) r += len(region_countries) def ws_04(self, ws): """ Cols: Region, Media type Rows: Major Topic """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('topic')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] major_topic = TOPIC_GROUPS[r['topic']] counts.update({(media_id, major_topic): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, MEDIA_TYPES, MAJOR_TOPICS, row_perc=False, sec_cols=10) def ws_05(self, ws): """ Cols: Subject sex Rows: Major Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country__in': self.country_list}) # .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, GENDER, MAJOR_TOPICS, row_perc=True) def ws_06(self, ws): """ Cols: Region, Subject sex: female only Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in person_models.iteritems(): topic_field = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic_field)\ .filter(**{model.sheet_name() + '__country_region__region':region})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['sex'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, MAJOR_TOPICS, row_perc=True, sec_cols=2, display_cols=self.female) def ws_07(self, ws): """ Cols: Media Type Rows: Subject Sex """ counts = Counter() for media_type, model in person_models.iteritems(): rows = model.objects\ .values('sex')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: # Get media id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, r['sex']): r['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.male_female, row_perc=False) def ws_08(self, ws): """ Cols: Subject Sex Rows: Scope """ counts = Counter() for media_type, model in person_models.iteritems(): if 'scope' in model.sheet_field().rel.to._meta.get_all_field_names(): scope = '%s__scope' % model.sheet_name() rows = model.objects\ .values('sex', scope)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['scope']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SCOPE, row_perc=True, display_cols=self.female) def ws_09(self, ws): """ Cols: Subject Sex Rows: Topic """ counts = Counter() for media_type, model in person_models.iteritems(): topic = '%s__topic' % model.sheet_name() rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_10(self, ws): """ Cols: Space Rows: Minor Topics :: Newspaper Sheets only """ # Calculate row values for column counts = Counter() for media_type, model in sheet_models.iteritems(): if media_type == 'Print': rows = model.objects\ .values('space', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['space'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, SPACE, MAJOR_TOPICS, row_perc=False) def ws_11(self, ws): """ Cols: Equality Rights Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_12(self, ws): """ Cols: Region, Equality Rights Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region_name in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): # Some models has no equality rights field if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country_region__region=region_name) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[region_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_13(self, ws): """ Cols: Journalist Sex, Equality Rights Rows: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): topic = '%s__topic' % model.sheet_name() equality_rights = '%s__equality_rights' % model.sheet_name() rows = model.objects\ .values(equality_rights, topic)\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex=gender_id) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['equality_rights'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, MAJOR_TOPICS, row_perc=True, sec_cols=4) def ws_14(self, ws): """ Cols: Sex Rows: Occupation """ counts = Counter() for media_type, model in person_models.iteritems(): # some Person models don't have an occupation field if 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, OCCUPATION, row_perc=True, display_cols=self.female) def ws_15(self, ws): """ Cols: Sex Rows: Function """ counts = Counter() for media_type, model in person_models.iteritems(): # some Person models don't have a function field if 'function' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in': self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, FUNCTION, row_perc=True, display_cols=self.female) def ws_16(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() for function_id, function in FUNCTION: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=function_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_17(self, ws): """ Cols: Age, Sex of Subject Rows: Function """ secondary_counts = OrderedDict() for age_id, age in AGES: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'age' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(age=age_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['function']): r['n'] for r in rows}) secondary_counts[age] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, FUNCTION, row_perc=False, sec_cols=4) def ws_18(self, ws): """ Cols: Sex Rows: Age :: Only for print """ counts = Counter() rows = NewspaperPerson.objects\ .values('sex', 'age')\ .filter(newspaper_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, NewspaperPerson.sheet_db_table(), 'Print') counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_19(self, ws): """ Cols: Sex Rows: Age :: Only for broadcast """ counts = Counter() broadcast = ['Television'] for media_type, model in person_models.iteritems(): if media_type in broadcast: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True) def ws_20(self, ws): """ Cols: Function, Sex Rows: Occupation """ secondary_counts = OrderedDict() functions_count = Counter() # Get top 5 functions for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('function')\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) functions_count.update({(r['function']): r['n'] for r in rows}) top_5_function_ids = [id for id, count in sorted(functions_count.items(), key=lambda x: -x[1])[:5]] top_5_functions = [(id, func) for id, func in FUNCTION if id in top_5_function_ids] for func_id, function in top_5_functions: counts = Counter() for media_type, model in person_models.iteritems(): if 'function' in model._meta.get_all_field_names() and 'occupation' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'occupation')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(function=func_id)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['occupation']): r['n'] for r in rows}) secondary_counts[function] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=False, sec_cols=4) def ws_21(self, ws): """ Cols: Subject Sex Rows: Victim type """ counts = Counter() for media_type, model in person_models.iteritems(): if 'victim_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'victim_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .exclude(victim_of=None) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['victim_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, VICTIM_OF, row_perc=False) def ws_23(self, ws): """ Cols: Subject Sex Rows: Survivor type """ counts = Counter() for media_type, model in person_models.iteritems(): if 'survivor_of' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'survivor_of')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .exclude(survivor_of=None)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['survivor_of']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, SURVIVOR_OF, row_perc=False) def ws_24(self, ws): """ Cols: Subject Sex Rows: Family Role """ counts = Counter() for media_type, model in person_models.iteritems(): if 'family_role' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_25(self, ws): """ Cols: Journalist Sex, Subject Sex Rows: Family Role """ secondary_counts = OrderedDict() for sex_id, sex in self.male_female: counts = Counter() for media_type, model in person_models.iteritems(): if 'family_role' in model._meta.get_all_field_names(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['family_role']): r['n'] for r in rows}) secondary_counts[sex] = counts secondary_counts['col_title_def'] = [ 'Sex of reporter', 'Sex of news subject'] self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, YESNO, row_perc=False, sec_cols=4) def ws_26(self, ws): """ Cols: Subject Sex Rows: Whether Quoted """ counts = Counter() for media_type, model in person_models.iteritems(): if 'is_quoted' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['is_quoted']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_27(self, ws): """ Cols: Subject Sex Rows: Photographed """ counts = Counter() for media_type, model in person_models.iteritems(): if 'is_photograph' in model._meta.get_all_field_names(): rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['is_photograph']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, IS_PHOTOGRAPH, row_perc=False) def ws_28(self, ws): """ Cols: Medium Rows: Region :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values(region)\ .filter(sex=1)\ .filter(**{region + '__in': self.region_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: # Get media and region id's to assign to counts media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] region_id = [r[0] for r in self.regions if r[1] == row['region']][0] counts.update({(media_id, region_id): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, self.regions, row_perc=True) def ws_29(self, ws): """ Cols: Regions Rows: Scope :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' scope = sheet_name + '__scope' if 'scope' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, scope)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] counts.update({(region_id, row['scope']): row['n']}) self.tabulate(ws, counts, self.regions, SCOPE, row_perc=False) def ws_30(self, ws): """ Cols: Region Rows: Major Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(region, topic)\ .filter(**{region + '__in': self.region_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] major_topic = TOPIC_GROUPS[row['topic']] counts.update({(region_id, major_topic): row['n']}) self.tabulate(ws, counts, self.regions, MAJOR_TOPICS, row_perc=False) def ws_31(self, ws): """ Cols: Sex of Reporter Rows: Minor Topics """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, TOPICS, row_perc=True, display_cols=self.female) def ws_32(self, ws): """ Cols: Medium Rows: Topics :: Female reporters only """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' if 'topic' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: media_id = [media[0] for media in MEDIA_TYPES if media[1] == media_type][0] counts.update({(media_id, row['topic']): row['n']}) self.tabulate(ws, counts, MEDIA_TYPES, TOPICS, row_perc=False) def ws_34(self, ws): """ Cols: Sex of reporter Rows: Sex of subject """ counts = Counter() for media_type, model in person_models.iteritems(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) # rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of reporter' self.tabulate(ws, counts, self.male_female, GENDER, row_perc=True, display_cols=self.female) def ws_35(self, ws): """ Cols: Sex of reporter Rows: Age of reporter :: Only for television """ counts = Counter() rows = TelevisionJournalist.objects\ .values('sex', 'age')\ .filter(television_sheet__country__in=self.country_list)\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, TelevisionJournalist.sheet_db_table(), 'Television') counts.update({(r['sex'], r['age']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, AGES, row_perc=True, display_cols=self.female) def ws_36(self, ws): """ Cols: Sex of Reporter Rows: Focus: about women """ counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() about_women = sheet_name + '__about_women' if 'about_women' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values('sex', about_women)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex__in=self.male_female_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['sex'], r['about_women']): r['n'] for r in rows}) self.tabulate(ws, counts, self.male_female, YESNO, row_perc=False) def ws_38(self, ws): """ Cols: Focus: about women Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['about_women'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, YESNO, MAJOR_TOPICS, row_perc=True) def ws_39(self, ws): """ Cols: Focus: about women Rows: Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('about_women', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=True) def ws_40(self, ws): """ Cols: Region, Topics Rows: Focus: about women """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'about_women' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'about_women')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['about_women'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=False, sec_cols=2, display_cols=self.yes) def ws_41(self, ws): """ Cols: Equality rights raised Rows: Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) self.tabulate(ws, counts, YESNO, TOPICS, row_perc=False) def ws_42(self, ws): """ Cols: Region, Equality rights raised Rows: Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=True, sec_cols=4) def ws_43(self, ws): """ Cols: Sex of reporter, Equality rights raised Cols: Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(topic, equality_rights)\ .filter(**{model.sheet_name() + '__country__in':self.country_list})\ .filter(sex=gender_id) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['equality_rights'], r['topic']): r['n'] for r in rows}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, TOPICS, row_perc=True, sec_cols=4) def ws_44(self, ws): """ Cols: Sex of reporter, Equality rights raised Rows: Region """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() region = sheet_name + '__country_region__region' equality_rights = sheet_name + '__equality_rights' if 'equality_rights' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(equality_rights, region)\ .filter(sex=gender_id)\ .filter(**{region + '__in':self.region_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: region_id = [id for id, name in self.regions if name == r['region']][0] counts.update({(r['equality_rights'], region_id): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.regions, row_perc=True, sec_cols=4) def ws_45(self, ws): """ Cols: Sex of news subject Rows: Region :: Equality rights raised == Yes """ counts = Counter() for media_type, model in person_models.iteritems(): if 'equality_rights' in model.sheet_field().rel.to._meta.get_all_field_names(): region = model.sheet_name() + '__country_region__region' equality_rights = model.sheet_name() + '__equality_rights' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in':self.region_list})\ .filter(**{equality_rights:'Y'}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: region_id = [id for id, name in self.regions if name == r['region']][0] counts.update({(r['sex'], region_id): r['n']}) self.tabulate(ws, counts, self.male_female, self.regions, row_perc=True) def ws_46(self, ws): """ Cols: Region, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for region_id, region in self.regions: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country_region__region=region) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(TOPIC_GROUPS[r['topic']], r['stereotypes']): r['n']}) secondary_counts[region] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True, sec_cols=8) def ws_47(self, ws): """ Cols: Stereotypes Rows: Major Topics """ counts = Counter() for media_type, model in sheet_models.iteritems(): rows = model.objects\ .values('stereotypes', 'topic')\ .filter(country__in=self.country_list) rows = self.apply_weights(rows, model._meta.db_table, media_type) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) self.tabulate(ws, counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True) def ws_48(self, ws): """ Cols: Sex of reporter, Stereotypes Rows: Major Topics """ secondary_counts = OrderedDict() for gender_id, gender in self.male_female: counts = Counter() for media_type, model in journalist_models.iteritems(): sheet_name = model.sheet_name() topic = sheet_name + '__topic' stereotypes = sheet_name + '__stereotypes' if 'stereotypes' in model._meta.get_field(sheet_name).rel.to._meta.get_all_field_names(): rows = model.objects\ .values(stereotypes, topic)\ .filter(sex=gender_id)\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for r in rows: counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']}) secondary_counts[gender] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=False, sec_cols=8) def ws_49(self, ws): """ Cols: Major Topics Rows: Region :: Internet media type only """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country_region__region')\ .filter(country_region__region__in=self.region_list) rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: region_id = [r[0] for r in self.regions if r[1] == row['region']][0] major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, region_id): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.regions, row_perc=True) def ws_50(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories shared on Twitter """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(shared_via_twitter='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_51(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories shared on Facebook """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(shared_on_facebook='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_52(self, ws): """ Cols: Major Topics Rows: Country :: Internet media type only :: Only stories with reference to gener equality """ counts = Counter() model = sheet_models.get('Internet') rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(equality_rights='Y') rows = self.apply_weights(rows, model._meta.db_table, 'Internet') for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, row['country']): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True) def ws_53(self, ws): """ Cols: Topic Rows: Country :: Internet media type only :: Female reporters only """ display_cols = [(id, value) for id, value in GENDER if id==1] secondary_counts = OrderedDict() model = sheet_models.get('Internet') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() journo_sex_field = '%s__sex' % model.journalist_field_name() rows = model.objects\ .values(journo_sex_field, 'country')\ .filter(topic__in=topic_ids) rows = self.apply_weights(rows, model._meta.db_table, 'Internet') counts.update({(r['sex'], r['country']): r['n'] for r in rows}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, display_cols=display_cols, sec_cols=2) def ws_54(self, ws): """ Cols: Major Topic, sex of subject Rows: Country :: Internet media type only """ secondary_counts = OrderedDict() model = person_models.get('Internet') for major_topic, topic_ids in GROUP_TOPICS_MAP.iteritems(): counts = Counter() country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('sex', country_field)\ .filter(**{model.sheet_name() + '__topic__in':topic_ids}) rows = self.apply_weights(rows, model.sheet_db_table(), 'Internet') counts.update({(r['sex'], r['country']): r['n'] for r in rows}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, sec_cols=8) def ws_55(self, ws): """ Cols: Occupation Rows: Country :: Show all countries :: Only female subjects :: Internet media type only """ counts = Counter() model = person_models.get('Internet') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'occupation')\ .filter(sex=1) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r['occupation'], r['country']): r['n'] for r in rows}) self.tabulate(ws, counts, OCCUPATION, self.countries, row_perc=True) def ws_56(self, ws): """ Cols: Function Rows: Country :: Show all countries :: Internet media type only """ counts = Counter() model = person_models.get('Internet') country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values(country_field, 'function')\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r['function'], r['country']): r['n'] for r in rows}) self.tabulate(ws, counts, FUNCTION, self.countries, row_perc=True) def ws_57(self, ws): """ Cols: Sex of subject Rows: Country, Family role :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'family_role')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['family_role']): row['n'] for row in rows} # If only captured countries should be displayed use # if counts.keys(): self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_58(self, ws): """ Cols: Sex of subject Rows: Country, is photographed :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_photograph')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['is_photograph']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, IS_PHOTOGRAPH, row_perc=True, sec_row=True, r=r) r += len(IS_PHOTOGRAPH) def ws_59(self, ws): """ Cols: Sex of reporter Rows: Sex of subject :: Internet media only """ counts = Counter() model = person_models.get('Internet') sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).rel.to.journalist_field_name() journo_sex = sheet_name + '__' + journo_name + '__sex' rows = model.objects\ .values(journo_sex, 'sex')\ .filter(**{model.sheet_name() + '__country__in':self.country_list}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts.update({(r[journo_sex], r['sex']): r['n'] for r in rows}) counts['col_title_def'] = 'Sex of reporter' self.tabulate(ws, counts, GENDER, GENDER, row_perc=False) def ws_60(self, ws): """ Cols: Sex of subject Rows: Country, age :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'age')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['age']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, AGES, row_perc=True, sec_row=True, r=r) r += len(AGES) def ws_61(self, ws): """ Cols: Sex of subject Rows: Country, is_quoted :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, GENDER) counts = Counter() model = person_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('sex', 'is_quoted')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Internet") counts = {(row['sex'], row['is_quoted']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, GENDER, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_62(self, ws): """ Cols: Topic Rows: Country, equality raised :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'equality_rights')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['equality_rights']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_63(self, ws): """ Cols: Topic Rows: Country, stereotypes challenged :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_64(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Internet media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Internet') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Internet") counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=True, sec_row=True, r=r) r += len(YESNO) def ws_65(self, ws): """ Cols: Topic Rows: Country, tweet or retweet :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'retweet')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['retweet']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, RETWEET, row_perc=False, sec_row=True, r=r) r += len(RETWEET) def ws_66(self, ws): """ Cols: Topic Rows: Country, sex of news subject :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = person_models.get('Twitter') topic_field = '%s__topic' % model.sheet_name() for code, country in self.countries: rows = model.objects\ .values(topic_field, 'sex')\ .filter(**{model.sheet_name() + '__country':code}) rows = self.apply_weights(rows, model.sheet_db_table(), "Twitter") counts.update({(row['topic'], row['sex']): row['n'] for row in rows}) self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, GENDER, row_perc=True, sec_row=True, r=r) r += len(GENDER) def ws_67(self, ws): """ Cols: Topic Rows: Country :: Only female journalists :: Show all countries :: Twitter media type only """ counts = Counter() model = sheet_models.get('Twitter') rows = model.objects\ .values('topic', 'country')\ .filter(**{model.journalist_field_name() + '__sex':1}) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts.update({(row['topic'], row['country']): row['n'] for row in rows}) self.tabulate(ws, counts, TOPICS, self.countries, row_perc=True, sec_row=False) def ws_68(self, ws): """ Cols: Topic Rows: Country, about women :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'about_women')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['about_women']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, YESNO, row_perc=False, sec_row=True, r=r) r += len(YESNO) def ws_69(self, ws): """ Cols: Topic Rows: Country, stereotypes :: Show all countries :: Twitter media type only """ r = 6 self.write_col_headings(ws, TOPICS) counts = Counter() model = sheet_models.get('Twitter') for code, country in self.countries: rows = model.objects\ .values('topic', 'stereotypes')\ .filter(country=code) rows = self.apply_weights(rows, model._meta.db_table, "Twitter") counts = {(row['topic'], row['stereotypes']): row['n'] for row in rows} self.write_primary_row_heading(ws, country, r=r) self.tabulate(ws, counts, TOPICS, AGREE_DISAGREE, row_perc=True, sec_row=True, r=r) r += len(AGREE_DISAGREE) def ws_76(self, ws): """ Cols: Topic, Stereotypes Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'stereotypes' in model._meta.get_all_field_names(): rows = model.objects\ .values('stereotypes', 'country')\ .filter(topic=topic_id) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['stereotypes'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, self.countries, row_perc=True, sec_cols=8) def ws_77(self, ws): """ Cols: Topic, Reference to gender equality Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in sheet_models.iteritems(): if 'equality_rights' in model._meta.get_all_field_names(): rows = model.objects\ .values('equality_rights', 'country')\ .filter(topic=topic_id) rows = self.apply_weights(rows, model._meta.db_table, media_type) counts.update({(r['equality_rights'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, YESNO, self.countries, row_perc=True, sec_cols=4) def ws_78(self, ws): """ Cols: Topic, victim_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in person_models.iteritems(): if 'victim_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('victim_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['victim_of'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, VICTIM_OF, self.countries, row_perc=True, sec_cols=18) def ws_79(self, ws): """ Cols: Topic, survivor_of Rows: Country """ secondary_counts = OrderedDict() for topic_id, topic in TOPICS: counts = Counter() for media_type, model in person_models.iteritems(): if 'survivor_of' in model._meta.get_all_field_names(): country_field = '%s__country' % model.sheet_name() rows = model.objects\ .values('survivor_of', country_field)\ .filter(**{model.sheet_name() + '__topic':topic_id}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) counts.update({(r['survivor_of'], r['country']): r['n'] for r in rows}) secondary_counts[topic] = counts self.tabulate_secondary_cols(ws, secondary_counts, SURVIVOR_OF, self.countries, row_perc=True, sec_cols=18) # ------------------------------------------------------------------------------- # Helper functions # def write_headers(self, ws, title, description): """ Write the headers to the worksheet """ ws.write(0, 0, title, self.heading) ws.write(1, 0, description, self.heading) ws.write(3, 2, self.gmmp_year, self.heading) def write_col_headings(self, ws, cols, c=2, r=4): """ :param ws: worksheet to write to :param cols: list of `(col_id, col_title)` tuples of column ids and titles :param r, c: initial position where cursor should start writing to """ for col_id, col_title in cols: ws.write(r, c, clean_title(col_title), self.col_heading) ws.write(r + 1, c, "N") ws.write(r + 1, c + 1, "%") c += 2 def write_primary_row_heading(self, ws, heading, c=0, r=6): """ :param ws: worksheet to write to :param heading: row heading to write :param r, c: position where heading should be written to """ ws.write(r, c, clean_title(heading), self.heading) def tabulate_secondary_cols(self, ws, secondary_counts, cols, rows, row_perc=False, display_cols=None, sec_cols=4): """ :param ws: worksheet to write to :param secondary_counts: dict in following format: {'Primary column heading': Count object, ...} :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_cols: amount of cols needed for secondary cols """ r, c = 7, 1 # row titles for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 if 'col_title_def' in secondary_counts: # Write definitions of column heading titles ws.write(r-3, c-1, secondary_counts['col_title_def'][0], self.sec_col_heading_def) ws.write(r-2, c-1, secondary_counts['col_title_def'][1], self.col_heading_def) secondary_counts.pop('col_title_def') for field, counts in secondary_counts.iteritems(): ws.merge_range(r-3, c, r-3, c+sec_cols-1, clean_title(field), self.sec_col_heading) self.tabulate(ws, counts, cols, rows, row_perc=row_perc, sec_col=True, display_cols=display_cols, r=7, c=c) c += sec_cols def tabulate(self, ws, counts, cols, rows, row_perc=False, sec_col=False, sec_row=False, display_cols=None, c=1, r=6): """ Emit a table. :param ws: worksheet to write to :param dict counts: dict from `(col_id, row_id)` tuples to count for that combination. :param list cols: list of `(col_id, col_title)` tuples of column ids and titles :param list rows: list of `(row_id, row_title)` tuples of row ids and titles :param bool row_perc: should percentages by calculated by row instead of column (default: False) :param sec_col: Are wecreating a secondary column title(default: False) :param sec_row: Are we creating a secondary row title(default: False) :param display_cols: Optional if only a subset of columns should be displayed e.g. only female :param r, c: initial position where cursor should start writing to """ if row_perc: # we'll need percentage by rows row_totals = {} for row_id, row_title in rows: row_totals[row_id] = sum(counts.get((col_id, row_id), 0) for col_id, _ in cols) # noqa # row titles if not sec_col: # Else already written for i, row in enumerate(rows): row_id, row_title = row ws.write(r + i, c, clean_title(row_title), self.label) c += 1 # if only filtered results should be shown # e.g. only print female columns if display_cols: cols = display_cols if 'col_title_def' in counts and not sec_row: ws.write(r - 2, c-1, counts['col_title_def'], self.col_heading_def) counts.pop('col_title_def') # values, written by column for col_id, col_heading in cols: # column title if not sec_row: ws.merge_range(r-2, c, r-2, c+1, clean_title(col_heading), self.col_heading) ws.write(r - 1, c, "N", self.label) ws.write(r - 1, c + 1, "%", self.label) if not row_perc: # column totals # Confirm: Perc of col total or matrix total? # total = sum(counts.itervalues()) total = sum(counts.get((col_id, row_id), 0) for row_id, _ in rows) # row values for this column for i, row in enumerate(rows): row_id, row_title = row if row_perc: # row totals total = row_totals[row_id] n = counts.get((col_id, row_id), 0) ws.write(r + i, c, n, self.N) ws.write(r + i, c + 1, p(n, total), self.P) c += 2
import sys, os, glob import json from astropy.io import fits from astropy.table import Table, join import numpy as np import time, datetime from collections import OrderedDict import subprocess from copy import deepcopy from desispec.scripts.tile_redshifts import generate_tile_redshift_scripts, get_tile_redshift_script_pathname, \ get_tile_redshift_relpath, get_tile_redshift_script_suffix from desispec.workflow.queue import get_resubmission_states, update_from_queue from desispec.workflow.timing import what_night_is_it from desispec.workflow.desi_proc_funcs import get_desi_proc_batch_file_pathname, create_desi_proc_batch_script, \ get_desi_proc_batch_file_path from desispec.workflow.utils import pathjoin, sleep_and_report from desispec.workflow.tableio import write_table from desispec.workflow.proctable import table_row_to_dict from desiutil.log import get_logger from desispec.io import findfile, specprod_root from desispec.io.util import decode_camword, create_camword, difference_camwords, camword_to_spectros ################################################# ############## Misc Functions ################### ################################################# def night_to_starting_iid(night=None): """ Creates an internal ID for a given night. The resulting integer is an 8 digit number. The digits are YYMMDDxxx where YY is the years since 2000, MM and DD are the month and day. xxx are 000, and are incremented for up to 1000 unique job ID's for a given night. Args: night, str or int. YYYYMMDD of the night to get the starting internal ID for. Returns: internal_id, int. 9 digit number consisting of YYMMDD000. YY is years after 2000, MMDD is month and day. 000 being the starting job number (0). """ if night is None: night = what_night_is_it() night = int(night) internal_id = (night - 20000000) * 1000 return internal_id ################################################# ############ Script Functions ################### ################################################# def batch_script_name(prow): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, JOBDESC, PROCCAMWORD defined) and determines the script file pathname as defined by desi_proc's helper functions. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. Returns: scriptfile, str. The complete pathname to the script file, as it is defined within the desi_proc ecosystem. """ expids = prow['EXPID'] if len(expids) == 0: expids = None pathname = get_desi_proc_batch_file_pathname(night = prow['NIGHT'], exp=expids, \ jobdesc=prow['JOBDESC'], cameras=prow['PROCCAMWORD']) scriptfile = pathname + '.slurm' return scriptfile def check_for_outputs_on_disk(prow, resubmit_partial_complete=True): """ Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect the change in job status after creating and submitting the job for processing. """ prow['STATUS'] = 'UNKNOWN' log = get_logger() job_to_file_map = { 'prestdstar': 'sframe', 'stdstarfit': 'stdstars', 'poststdstar': 'cframe', 'nightlybias': 'biasnight', 'ccdcalib': 'badcolumns', 'arc': 'fitpsf', 'flat': 'fiberflat', 'psfnight': 'psfnight', 'nightlyflat': 'fiberflatnight', 'spectra': 'spectra_tile', 'coadds': 'coadds_tile', 'redshift': 'redrock_tile', } night = prow['NIGHT'] if prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']: filetype = 'redrock_tile' else: filetype = job_to_file_map[prow['JOBDESC']] orig_camword = prow['PROCCAMWORD'] ## if spectro based, look for spectros, else look for cameras if prow['JOBDESC'] in ['stdstarfit','spectra','coadds','redshift']: ## Spectrograph based spectros = camword_to_spectros(prow['PROCCAMWORD']) n_desired = len(spectros) ## Suppress outputs about using tile based files in findfile if only looking for stdstarfits if prow['JOBDESC'] == 'stdstarfit': tileid = None else: tileid = prow['TILEID'] expid = prow['EXPID'][0] existing_spectros = [] for spectro in spectros: if os.path.exists(findfile(filetype=filetype, night=night, expid=expid, spectrograph=spectro, tile=tileid)): existing_spectros.append(spectro) completed = (len(existing_spectros) == n_desired) if not completed and resubmit_partial_complete and len(existing_spectros) > 0: existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)]) prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword) elif prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']: ## Spectrograph based spectros = camword_to_spectros(prow['PROCCAMWORD']) n_desired = len(spectros) ## Suppress outputs about using tile based files in findfile if only looking for stdstarfits tileid = prow['TILEID'] expid = prow['EXPID'][0] redux_dir = specprod_root() outdir = os.path.join(redux_dir,get_tile_redshift_relpath(tileid,group=prow['JOBDESC'],night=night,expid=expid)) suffix = get_tile_redshift_script_suffix(tileid, group=prow['JOBDESC'], night=night, expid=expid) existing_spectros = [] for spectro in spectros: if os.path.exists(os.path.join(outdir, f"redrock-{spectro}-{suffix}.fits")): existing_spectros.append(spectro) completed = (len(existing_spectros) == n_desired) if not completed and resubmit_partial_complete and len(existing_spectros) > 0: existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)]) prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword) else: ## Otheriwse camera based cameras = decode_camword(prow['PROCCAMWORD']) n_desired = len(cameras) if len(prow['EXPID']) > 0: expid = prow['EXPID'][0] else: expid = None if len(prow['EXPID']) > 1 and prow['JOBDESC'] not in ['psfnight','nightlyflat']: log.warning(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']}. This job type only makes " + f"sense with a single exposure. Proceeding with {expid}.") missing_cameras = [] for cam in cameras: if not os.path.exists(findfile(filetype=filetype, night=night, expid=expid, camera=cam)): missing_cameras.append(cam) completed = (len(missing_cameras) == 0) if not completed and resubmit_partial_complete and len(missing_cameras) < n_desired: prow['PROCCAMWORD'] = create_camword(missing_cameras) if completed: prow['STATUS'] = 'COMPLETED' log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " + f"the desired {n_desired} {filetype}'s. Not submitting this job.") elif resubmit_partial_complete and orig_camword != prow['PROCCAMWORD']: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " + f"some {filetype}'s. Submitting smaller camword={prow['PROCCAMWORD']}.") elif not resubmit_partial_complete: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} doesn't have all " + f"{filetype}'s and resubmit_partial_complete=False. "+ f"Submitting full camword={prow['PROCCAMWORD']}.") else: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} has no " + f"existing {filetype}'s. Submitting full camword={prow['PROCCAMWORD']}.") return prow def create_and_submit(prow, queue='realtime', reservation=None, dry_run=0, joint=False, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper script that takes a processing table row and three modifier keywords, creates a submission script for the compute nodes, and then submits that script to the Slurm scheduler with appropriate dependencies. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be run with desi_proc_joint_fit. Default is False. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect the change in job status after creating and submitting the job for processing. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ orig_prow = prow.copy() if check_for_outputs: prow = check_for_outputs_on_disk(prow, resubmit_partial_complete) if prow['STATUS'].upper() == 'COMPLETED': return prow prow = create_batch_script(prow, queue=queue, dry_run=dry_run, joint=joint, system_name=system_name) prow = submit_batch_script(prow, reservation=reservation, dry_run=dry_run, strictly_successful=strictly_successful) ## If resubmitted partial, the PROCCAMWORD and SCRIPTNAME will correspond to the pruned values. But we want to ## retain the full job's value, so get those from the old job. if resubmit_partial_complete: prow['PROCCAMWORD'] = orig_prow['PROCCAMWORD'] prow['SCRIPTNAME'] = orig_prow['SCRIPTNAME'] return prow def desi_proc_command(prow, queue=None): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, JOBDESC, PROCCAMWORD defined) and determines the proper command line call to process the data defined by the input row/dict. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default). Returns: cmd, str. The proper command to be submitted to desi_proc to process the job defined by the prow values. """ cmd = 'desi_proc' cmd += ' --batch' cmd += ' --nosubmit' cmd += ' --traceshift' if queue is not None: cmd += f' -q {queue}' if prow['OBSTYPE'].lower() == 'science': if prow['JOBDESC'] == 'prestdstar': cmd += ' --nostdstarfit --nofluxcalib' elif prow['JOBDESC'] == 'poststdstar': cmd += ' --noprestdstarfit --nostdstarfit' elif prow['JOBDESC'] in ['nightlybias', 'ccdcalib']: cmd += ' --nightlybias' pcamw = str(prow['PROCCAMWORD']) cmd += f" --cameras={pcamw} -n {prow['NIGHT']}" if len(prow['EXPID']) > 0: cmd += f" -e {prow['EXPID'][0]}" if prow['BADAMPS'] != '': cmd += ' --badamps={}'.format(prow['BADAMPS']) return cmd def desi_proc_joint_fit_command(prow, queue=None): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, PROCCAMWORD defined) and determines the proper command line call to process the data defined by the input row/dict. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default). Returns: cmd, str. The proper command to be submitted to desi_proc_joint_fit to process the job defined by the prow values. """ cmd = 'desi_proc_joint_fit' cmd += ' --batch' cmd += ' --nosubmit' cmd += ' --traceshift' if queue is not None: cmd += f' -q {queue}' descriptor = prow['OBSTYPE'].lower() night = prow['NIGHT'] specs = str(prow['PROCCAMWORD']) expid_str = ','.join([str(eid) for eid in prow['EXPID']]) cmd += f' --obstype {descriptor}' cmd += f' --cameras={specs} -n {night}' if len(expid_str) > 0: cmd += f' -e {expid_str}' return cmd def create_batch_script(prow, queue='realtime', dry_run=0, joint=False, system_name=None): """ Wrapper script that takes a processing table row and three modifier keywords and creates a submission script for the compute nodes. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue. dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written but not submitted. If dry_run=2, the scripts will not be written nor submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be run with desi_proc_joint_fit. Default is False. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for scriptname. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ log = get_logger() if prow['JOBDESC'] in ['perexp','pernight','pernight-v0','cumulative']: if dry_run > 1: scriptpathname = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'], night=prow['NIGHT'], expid=prow['EXPID'][0]) log.info("Output file would have been: {}".format(scriptpathname)) else: #- run zmtl for cumulative redshifts but not others run_zmtl = (prow['JOBDESC'] == 'cumulative') scripts, failed_scripts = generate_tile_redshift_scripts(tileid=prow['TILEID'], group=prow['JOBDESC'], night=[prow['NIGHT']], expid=prow['EXPID'], run_zmtl=run_zmtl, batch_queue=queue, system_name=system_name, nosubmit=True) if len(failed_scripts) > 0: log.error(f"Redshifts failed for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+ f"tileid={prow['TILEID']}, expid={prow['EXPID']}.") log.info(f"Returned failed scriptname is {failed_scripts}") elif len(scripts) > 1: log.error(f"More than one redshifts returned for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+ f"tileid={prow['TILEID']}, expid={prow['EXPID']}.") log.info(f"Returned scriptnames were {scripts}") else: scriptpathname = scripts[0] else: if joint: cmd = desi_proc_joint_fit_command(prow, queue=queue) else: cmd = desi_proc_command(prow, queue=queue) scriptpathname = batch_script_name(prow) if dry_run > 1: log.info("Output file would have been: {}".format(scriptpathname)) log.info("Command to be run: {}".format(cmd.split())) else: log.info("Running: {}".format(cmd.split())) expids = prow['EXPID'] if len(expids) == 0: expids = None scriptpathname = create_desi_proc_batch_script(night=prow['NIGHT'], exp=expids, cameras=prow['PROCCAMWORD'], jobdesc=prow['JOBDESC'], queue=queue, cmdline=cmd, system_name=system_name) log.info("Outfile is: {}".format(scriptpathname)) prow['SCRIPTNAME'] = os.path.basename(scriptpathname) return prow def submit_batch_script(prow, dry_run=0, reservation=None, strictly_successful=False): """ Wrapper script that takes a processing table row and three modifier keywords and submits the scripts to the Slurm scheduler. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for scriptname. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ log = get_logger() dep_qids = prow['LATEST_DEP_QID'] dep_list, dep_str = '', '' if len(dep_qids) > 0: jobtype = prow['JOBDESC'] if strictly_successful: depcond = 'afterok' elif jobtype in ['arc', 'psfnight', 'prestdstar', 'stdstarfit']: ## (though psfnight and stdstarfit will require some inputs otherwise they'll go up in flames) depcond = 'afterany' else: ## if 'flat','nightlyflat','poststdstar', or any type of redshift, require strict success of inputs depcond = 'afterok' dep_str = f'--dependency={depcond}:' if np.isscalar(dep_qids): dep_list = str(dep_qids).strip(' \t') if dep_list == '': dep_str = '' else: dep_str += dep_list else: if len(dep_qids)>1: dep_list = ':'.join(np.array(dep_qids).astype(str)) dep_str += dep_list elif len(dep_qids) == 1 and dep_qids[0] not in [None, 0]: dep_str += str(dep_qids[0]) else: dep_str = '' # script = f'{jobname}.slurm' # script_path = pathjoin(batchdir, script) if prow['JOBDESC'] in ['pernight-v0','pernight','perexp','cumulative']: script_path = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'], night=prow['NIGHT'], expid=np.min(prow['EXPID'])) jobname = os.path.split(script_path)[-1] else: batchdir = get_desi_proc_batch_file_path(night=prow['NIGHT']) jobname = batch_script_name(prow) script_path = pathjoin(batchdir, jobname) batch_params = ['sbatch', '--parsable'] if dep_str != '': batch_params.append(f'{dep_str}') if reservation is not None: batch_params.append(f'--reservation={reservation}') batch_params.append(f'{script_path}') if dry_run: ## in dry_run, mock Slurm ID's are generated using CPU seconds. Wait one second so we have unique ID's current_qid = int(time.time() - 1.6e9) time.sleep(1) else: current_qid = subprocess.check_output(batch_params, stderr=subprocess.STDOUT, text=True) current_qid = int(current_qid.strip(' \t\n')) log.info(batch_params) log.info(f'Submitted {jobname} with dependencies {dep_str} and reservation={reservation}. Returned qid: {current_qid}') prow['LATEST_QID'] = current_qid prow['ALL_QIDS'] = np.append(prow['ALL_QIDS'],current_qid) prow['STATUS'] = 'SUBMITTED' prow['SUBMIT_DATE'] = int(time.time()) return prow ############################################# ########## Row Manipulations ############ ############################################# def define_and_assign_dependency(prow, calibjobs): """ Given input processing row and possible calibjobs, this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for 'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. Each value that isn't None must contain 'INTID', and 'LATEST_QID'. If None, it assumes the dependency doesn't exist and no dependency is assigned. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for 'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ if prow['OBSTYPE'] in ['science', 'twiflat']: if calibjobs['nightlyflat'] is not None: dependency = calibjobs['nightlyflat'] elif calibjobs['psfnight'] is not None: dependency = calibjobs['psfnight'] elif calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] prow['JOBDESC'] = 'prestdstar' elif prow['OBSTYPE'] == 'flat': if calibjobs['psfnight'] is not None: dependency = calibjobs['psfnight'] elif calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] elif prow['OBSTYPE'] == 'arc': if calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] else: dependency = None prow = assign_dependency(prow, dependency) return prow def assign_dependency(prow, dependency): """ Given input processing row and possible arcjob (processing row for psfnight) and flatjob (processing row for nightlyflat), this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for 'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'. dependency, NoneType or scalar/list/array of Table.Row, dict. Processing row corresponding to the required input for the job in prow. This must contain keyword accessible values for 'INTID', and 'LATEST_QID'. If None, it assumes the dependency doesn't exist and no dependency is assigned. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for 'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ prow['INT_DEP_IDS'] = np.ndarray(shape=0).astype(int) prow['LATEST_DEP_QID'] = np.ndarray(shape=0).astype(int) if dependency is not None: if type(dependency) in [list, np.array]: ids, qids = [], [] for curdep in dependency: if still_a_dependency(curdep): ids.append(curdep['INTID']) qids.append(curdep['LATEST_QID']) prow['INT_DEP_IDS'] = np.array(ids, dtype=int) prow['LATEST_DEP_QID'] = np.array(qids, dtype=int) elif type(dependency) in [dict, OrderedDict, Table.Row] and still_a_dependency(dependency): prow['INT_DEP_IDS'] = np.array([dependency['INTID']], dtype=int) prow['LATEST_DEP_QID'] = np.array([dependency['LATEST_QID']], dtype=int) return prow def still_a_dependency(dependency): """ Defines the criteria for which a dependency is deemed complete (and therefore no longer a dependency). Args: dependency, Table.Row or dict. Processing row corresponding to the required input for the job in prow. This must contain keyword accessible values for 'STATUS', and 'LATEST_QID'. Returns: bool. False if the criteria indicate that the dependency is completed and no longer a blocking factor (ie no longer a genuine dependency). Returns True if the dependency is still a blocking factor such that the slurm scheduler needs to be aware of the pending job. """ return dependency['LATEST_QID'] > 0 and dependency['STATUS'] != 'COMPLETED' def get_type_and_tile(erow): """ Trivial function to return the OBSTYPE and the TILEID from an exposure table row Args: erow, Table.Row or dict. Must contain 'OBSTYPE' and 'TILEID' as keywords. Returns: tuple (str, str), corresponding to the OBSTYPE and TILEID values of the input erow. """ return str(erow['OBSTYPE']).lower(), erow['TILEID'] ############################################# ######### Table manipulators ############ ############################################# def parse_previous_tables(etable, ptable, night): """ This takes in the exposure and processing tables and regenerates all the working memory variables needed for the daily processing script. Used by the daily processing to define most of its state-ful variables into working memory. If the processing table is empty, these are simply declared and returned for use. If the code had previously run and exited (or crashed), however, this will all the code to re-establish itself by redefining these values. Args: etable, Table, Exposure table of all exposures that have been dealt with thus far. ptable, Table, Processing table of all exposures that have been processed. night, str or int, the night the data was taken. Returns: arcs, list of dicts, list of the individual arc jobs used for the psfnight (NOT all the arcs, if multiple sets existed) flats, list of dicts, list of the individual flat jobs used for the nightlyflat (NOT all the flats, if multiple sets existed) sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile) calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. curtype, None, the obstype of the current job being run. Always None as first new job will define this. lasttype, str or None, the obstype of the last individual exposure row to be processed. curtile, None, the tileid of the current job (if science). Otherwise None. Always None as first new job will define this. lasttile, str or None, the tileid of the last job (if science). Otherwise None. internal_id, int, an internal identifier unique to each job. Increments with each new job. This is the latest unassigned value. """ log = get_logger() arcs, flats, sciences = [], [], [] calibjobs = {'nightlybias': None, 'ccdcalib': None, 'psfnight': None, 'nightlyflat': None} curtype,lasttype = None,None curtile,lasttile = None,None if len(ptable) > 0: prow = ptable[-1] internal_id = int(prow['INTID'])+1 lasttype,lasttile = get_type_and_tile(ptable[-1]) jobtypes = ptable['JOBDESC'] if 'nightlybias' in jobtypes: calibjobs['nightlybias'] = table_row_to_dict(ptable[jobtypes=='nightlybias'][0]) log.info("Located nightlybias job in exposure table: {}".format(calibjobs['nightlybias'])) if 'ccdcalib' in jobtypes: calibjobs['ccdcalib'] = table_row_to_dict(ptable[jobtypes=='ccdcalib'][0]) log.info("Located ccdcalib job in exposure table: {}".format(calibjobs['ccdcalib'])) if 'psfnight' in jobtypes: calibjobs['psfnight'] = table_row_to_dict(ptable[jobtypes=='psfnight'][0]) log.info("Located joint fit psfnight job in exposure table: {}".format(calibjobs['psfnight'])) elif lasttype == 'arc': seqnum = 10 for row in ptable[::-1]: erow = etable[etable['EXPID']==row['EXPID'][0]] if row['OBSTYPE'].lower() == 'arc' and int(erow['SEQNUM'])<seqnum: arcs.append(table_row_to_dict(row)) seqnum = int(erow['SEQNUM']) else: break ## Because we work backword to fill in, we need to reverse them to get chronological order back arcs = arcs[::-1] if 'nightlyflat' in jobtypes: calibjobs['nightlyflat'] = table_row_to_dict(ptable[jobtypes=='nightlyflat'][0]) log.info("Located joint fit nightlyflat job in exposure table: {}".format(calibjobs['nightlyflat'])) elif lasttype == 'flat': for row in ptable[::-1]: erow = etable[etable['EXPID']==row['EXPID'][0]] if row['OBSTYPE'].lower() == 'flat' and int(erow['SEQTOT']) < 5: if float(erow['EXPTIME']) > 100.: flats.append(table_row_to_dict(row)) else: break flats = flats[::-1] if lasttype.lower() == 'science': for row in ptable[::-1]: if row['OBSTYPE'].lower() == 'science' and row['TILEID'] == lasttile and \ row['JOBDESC'] == 'prestdstar' and row['LASTSTEP'] != 'skysub': sciences.append(table_row_to_dict(row)) else: break sciences = sciences[::-1] else: internal_id = night_to_starting_iid(night) return arcs,flats,sciences, \ calibjobs, \ curtype, lasttype, \ curtile, lasttile,\ internal_id def update_and_recurvsively_submit(proc_table, submits=0, resubmission_states=None, ptab_name=None, dry_run=0,reservation=None): """ Given an processing table, this loops over job rows and resubmits failed jobs (as defined by resubmission_states). Before submitting a job, it checks the dependencies for failures. If a dependency needs to be resubmitted, it recursively follows dependencies until it finds the first job without a failed dependency and resubmits that. Then resubmits the other jobs with the new Slurm jobID's for proper dependency coordination within Slurm. Args: proc_table, Table, the processing table with a row per job. submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler. resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a possible Slurm scheduler state, where you wish for jobs with that outcome to be resubmitted ptab_name, str, the full pathname where the processing table should be saved. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. Returns: proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have been updated for those jobs that needed to be resubmitted. submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is the number of submissions made from this function call plus the input submits value. Note: This modifies the inputs of both proc_table and submits and returns them. """ log = get_logger() if resubmission_states is None: resubmission_states = get_resubmission_states() log.info(f"Resubmitting jobs with current states in the following: {resubmission_states}") proc_table = update_from_queue(proc_table, dry_run=False) log.info("Updated processing table queue information:") cols = ['INTID','EXPID','OBSTYPE','JOBDESC','TILEID','LATEST_QID','STATUS'] print(np.array(cols)) for row in proc_table: print(np.array(row[cols])) print("\n") id_to_row_map = {row['INTID']: rown for rown, row in enumerate(proc_table)} for rown in range(len(proc_table)): if proc_table['STATUS'][rown] in resubmission_states: proc_table, submits = recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name, resubmission_states, reservation, dry_run) return proc_table, submits def recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name=None, resubmission_states=None, reservation=None, dry_run=0): """ Given a row of a processing table and the full processing table, this resubmits the given job. Before submitting a job, it checks the dependencies for failures in the processing table. If a dependency needs to be resubmitted, it recursively follows dependencies until it finds the first job without a failed dependency and resubmits that. Then resubmits the other jobs with the new Slurm jobID's for proper dependency coordination within Slurm. Args: rown, Table.Row, the row of the processing table that you want to resubmit. proc_table, Table, the processing table with a row per job. submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler. id_to_row_map, dict, lookup dictionary where the keys are internal ids (INTID's) and the values are the row position in the processing table. ptab_name, str, the full pathname where the processing table should be saved. resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a possible Slurm scheduler state, where you wish for jobs with that outcome to be resubmitted reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). Returns: proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have been updated for those jobs that needed to be resubmitted. submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is the number of submissions made from this function call plus the input submits value. Note: This modifies the inputs of both proc_table and submits and returns them. """ log = get_logger() row = proc_table[rown] log.info(f"Identified row {row['INTID']} as needing resubmission.") log.info(f"{row['INTID']}: Expid(s): {row['EXPID']} Job: {row['JOBDESC']}") if resubmission_states is None: resubmission_states = get_resubmission_states() ideps = proc_table['INT_DEP_IDS'][rown] if ideps is None: proc_table['LATEST_DEP_QID'][rown] = np.ndarray(shape=0).astype(int) else: all_valid_states = list(resubmission_states.copy()) all_valid_states.extend(['RUNNING','PENDING','SUBMITTED','PROCESSING']) for idep in np.sort(np.atleast_1d(ideps)): if proc_table['STATUS'][id_to_row_map[idep]] not in all_valid_states: log.warning(f"Proc INTID: {proc_table['INTID'][rown]} depended on" + f" INTID {proc_table['INTID'][id_to_row_map[idep]]}" + f" but that exposure has state" + f" {proc_table['STATUS'][id_to_row_map[idep]]} that" + f" isn't in the list of resubmission states." + f" Exiting this job's resubmission attempt.") return proc_table, submits qdeps = [] for idep in np.sort(np.atleast_1d(ideps)): if proc_table['STATUS'][id_to_row_map[idep]] in resubmission_states: proc_table, submits = recursive_submit_failed(id_to_row_map[idep], proc_table, submits, id_to_row_map, reservation=reservation, dry_run=dry_run) qdeps.append(proc_table['LATEST_QID'][id_to_row_map[idep]]) qdeps = np.atleast_1d(qdeps) if len(qdeps) > 0: proc_table['LATEST_DEP_QID'][rown] = qdeps else: log.error(f"number of qdeps should be 1 or more: Rown {rown}, ideps {ideps}") proc_table[rown] = submit_batch_script(proc_table[rown], reservation=reservation, strictly_successful=True, dry_run=dry_run) submits += 1 if not dry_run: sleep_and_report(1, message_suffix=f"after submitting job to queue") if submits % 10 == 0: if ptab_name is None: write_table(proc_table, tabletype='processing', overwrite=True) else: write_table(proc_table, tablename=ptab_name, overwrite=True) sleep_and_report(2, message_suffix=f"after writing to disk") if submits % 100 == 0: proc_table = update_from_queue(proc_table) if ptab_name is None: write_table(proc_table, tabletype='processing', overwrite=True) else: write_table(proc_table, tablename=ptab_name, overwrite=True) sleep_and_report(10, message_suffix=f"after updating queue and writing to disk") return proc_table, submits ######################################### ######## Joint fit ############## ######################################### def joint_fit(ptable, prows, internal_id, queue, reservation, descriptor, z_submit_types=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Given a set of prows, this generates a processing table row, creates a batch script, and submits the appropriate joint fitting job given by descriptor. If the joint fitting job is standard star fitting, the post standard star fits for all the individual exposures also created and submitted. The returned ptable has all of these rows added to the table given as input. Args: ptable, Table. The processing table where each row is a processed job. prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. descriptor, str. Description of the joint fitting job. Can either be 'science' or 'stdstarfit', 'arc' or 'psfnight', or 'flat' or 'nightlyflat'. z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each exposure. If not specified or None, then no redshifts are submitted. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case of a stdstarfit, the poststdstar science exposure jobs. joint_prow, dict. Row of a processing table corresponding to the joint fit job. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). """ log = get_logger() if len(prows) < 1: return ptable, None, internal_id if descriptor is None: return ptable, None elif descriptor == 'arc': descriptor = 'psfnight' elif descriptor == 'flat': descriptor = 'nightlyflat' elif descriptor == 'science': if z_submit_types is None or len(z_submit_types) == 0: descriptor = 'stdstarfit' if descriptor not in ['psfnight', 'nightlyflat', 'science','stdstarfit']: return ptable, None, internal_id log.info(" ") log.info(f"Joint fit criteria found. Running {descriptor}.\n") if descriptor == 'science': joint_prow = make_joint_prow(prows, descriptor='stdstarfit', internal_id=internal_id) else: joint_prow = make_joint_prow(prows, descriptor=descriptor, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) if descriptor in ['science','stdstarfit']: if descriptor == 'science': zprows = [] log.info(" ") log.info(f"Submitting individual science exposures now that joint fitting of standard stars is submitted.\n") for row in prows: if row['LASTSTEP'] == 'stdstarfit': continue row['JOBDESC'] = 'poststdstar' row['INTID'] = internal_id internal_id += 1 row['ALL_QIDS'] = np.ndarray(shape=0).astype(int) row = assign_dependency(row, joint_prow) row = create_and_submit(row, queue=queue, reservation=reservation, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(row) if descriptor == 'science' and row['LASTSTEP'] == 'all': zprows.append(row) ## Now run redshifts if descriptor == 'science' and len(zprows) > 0: log.info(" ") for zsubtype in z_submit_types: if zsubtype == 'perexp': for zprow in zprows: log.info(f"Submitting redshift fit of type {zsubtype} for TILEID {zprow['TILEID']} and EXPID {zprow['EXPID']}.\n") joint_prow = make_joint_prow([zprow], descriptor=zsubtype, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) else: log.info(f"Submitting joint redshift fits of type {zsubtype} for TILEID {zprows[0]['TILEID']}.\n") joint_prow = make_joint_prow(zprows, descriptor=zsubtype, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) if descriptor in ['psfnight', 'nightlyflat']: log.info(f"Setting the calibration exposures as calibrators in the processing table.\n") ptable = set_calibrator_flag(prows, ptable) return ptable, joint_prow, internal_id ## wrapper functions for joint fitting def science_joint_fit(ptable, sciences, internal_id, queue='realtime', reservation=None, z_submit_types=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the stdstarfit joint fit and redshift fitting. All variables are the same except: Arg 'sciences' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'science'. """ return joint_fit(ptable=ptable, prows=sciences, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='science', z_submit_types=z_submit_types, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def flat_joint_fit(ptable, flats, internal_id, queue='realtime', reservation=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the nightlyflat joint fit. All variables are the same except: Arg 'flats' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'nightlyflat'. """ return joint_fit(ptable=ptable, prows=flats, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='nightlyflat', dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def arc_joint_fit(ptable, arcs, internal_id, queue='realtime', reservation=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the psfnight joint fit. All variables are the same except: Arg 'arcs' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'psfnight'. """ return joint_fit(ptable=ptable, prows=arcs, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='psfnight', dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def make_joint_prow(prows, descriptor, internal_id): """ Given an input list or array of processing table rows and a descriptor, this creates a joint fit processing job row. It starts by copying the first input row, overwrites relevant columns, and defines the new dependencies (based on the input prows). Args: prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. descriptor, str. Description of the joint fitting job. Can either be 'stdstarfit', 'psfnight', or 'nightlyflat'. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). Returns: joint_prow, dict. Row of a processing table corresponding to the joint fit job. """ first_row = prows[0] joint_prow = first_row.copy() joint_prow['INTID'] = internal_id joint_prow['JOBDESC'] = descriptor joint_prow['LATEST_QID'] = -99 joint_prow['ALL_QIDS'] = np.ndarray(shape=0).astype(int) joint_prow['SUBMIT_DATE'] = -99 joint_prow['STATUS'] = 'U' joint_prow['SCRIPTNAME'] = '' joint_prow['EXPID'] = np.array([ currow['EXPID'][0] for currow in prows ], dtype=int) joint_prow = assign_dependency(joint_prow,dependency=prows) return joint_prow def checkfor_and_submit_joint_job(ptable, arcs, flats, sciences, calibjobs, lasttype, internal_id, z_submit_types=None, dry_run=0, queue='realtime', reservation=None, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Takes all the state-ful data from daily processing and determines whether a joint fit needs to be submitted. Places the decision criteria into a single function for easier maintainability over time. These are separate from the new standard manifest*.json method of indicating a calibration sequence is complete. That is checked independently elsewhere and doesn't interact with this. Args: ptable, Table, Processing table of all exposures that have been processed. arcs, list of dicts, list of the individual arc jobs to be used for the psfnight (NOT all the arcs, if multiple sets existed). May be empty if none identified yet. flats, list of dicts, list of the individual flat jobs to be used for the nightlyflat (NOT all the flats, if multiple sets existed). May be empty if none identified yet. sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile). May be empty if none identified yet. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. lasttype, str or None, the obstype of the last individual exposure row to be processed. internal_id, int, an internal identifier unique to each job. Increments with each new job. This is the smallest unassigned value. z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each exposure. If not specified or None, then no redshifts are submitted. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell, cori-knl, permutter-gpu Returns: ptable, Table, Processing table of all exposures that have been processed. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile). May be empty if none identified yet or we just submitted them for processing. internal_id, int, if no job is submitted, this is the same as the input, otherwise it is incremented upward from from the input such that it represents the smallest unused ID. """ if lasttype == 'science' and len(sciences) > 0: log = get_logger() skysubonly = np.array([sci['LASTSTEP'] == 'skysub' for sci in sciences]) if np.all(skysubonly): log.error("Identified all exposures in joint fitting request as skysub-only. Not submitting") sciences = [] return ptable, calibjobs, sciences, internal_id if np.any(skysubonly): log.error("Identified skysub-only exposures in joint fitting request") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences])) sciences = (np.array(sciences,dtype=object)[~skysubonly]).tolist() log.info("Removed skysub only exposures in joint fitting:") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences])) from collections import Counter tiles = np.array([sci['TILEID'] for sci in sciences]) counts = Counter(tiles) if len(counts.most_common()) > 1: log.error("Identified more than one tile in a joint fitting request") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("Tileid's: {}".format(tiles)) log.info("Returning without joint fitting any of these exposures.") # most_common, nmost_common = counts.most_common()[0] # if most_common == -99: # most_common, nmost_common = counts.most_common()[1] # log.warning(f"Given multiple tiles to jointly fit: {counts}. "+ # "Only processing the most common non-default " + # f"tile: {most_common} with {nmost_common} exposures") # sciences = (np.array(sciences,dtype=object)[tiles == most_common]).tolist() # log.info("Tiles and exposure id's being submitted for joint fitting:") # log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) # log.info("Tileid's: {}".format([row['TILEID'] for row in sciences])) sciences = [] return ptable, calibjobs, sciences, internal_id ptable, tilejob, internal_id = science_joint_fit(ptable, sciences, internal_id, z_submit_types=z_submit_types, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) if tilejob is not None: sciences = [] elif lasttype == 'flat' and calibjobs['nightlyflat'] is None and len(flats) == 12: ## Note here we have an assumption about the number of expected flats being greater than 11 ptable, calibjobs['nightlyflat'], internal_id \ = flat_joint_fit(ptable, flats, internal_id, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) elif lasttype == 'arc' and calibjobs['psfnight'] is None and len(arcs) == 5: ## Note here we have an assumption about the number of expected arcs being greater than 4 ptable, calibjobs['psfnight'], internal_id \ = arc_joint_fit(ptable, arcs, internal_id, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) return ptable, calibjobs, sciences, internal_id def set_calibrator_flag(prows, ptable): """ Sets the "CALIBRATOR" column of a procesing table row to 1 (integer representation of True) for all input rows. Used within joint fitting code to flag the exposures that were input to the psfnight or nightlyflat for later reference. Args: prows, list or array of Table.Rows or dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. ptable, Table. The processing table where each row is a processed job. Returns: ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case of a stdstarfit, the poststdstar science exposure jobs. """ for prow in prows: ptable['CALIBRATOR'][ptable['INTID'] == prow['INTID']] = 1 return ptable for resub need COMPLETED and don't need PROCESSING import sys, os, glob import json from astropy.io import fits from astropy.table import Table, join import numpy as np import time, datetime from collections import OrderedDict import subprocess from copy import deepcopy from desispec.scripts.tile_redshifts import generate_tile_redshift_scripts, get_tile_redshift_script_pathname, \ get_tile_redshift_relpath, get_tile_redshift_script_suffix from desispec.workflow.queue import get_resubmission_states, update_from_queue from desispec.workflow.timing import what_night_is_it from desispec.workflow.desi_proc_funcs import get_desi_proc_batch_file_pathname, create_desi_proc_batch_script, \ get_desi_proc_batch_file_path from desispec.workflow.utils import pathjoin, sleep_and_report from desispec.workflow.tableio import write_table from desispec.workflow.proctable import table_row_to_dict from desiutil.log import get_logger from desispec.io import findfile, specprod_root from desispec.io.util import decode_camword, create_camword, difference_camwords, camword_to_spectros ################################################# ############## Misc Functions ################### ################################################# def night_to_starting_iid(night=None): """ Creates an internal ID for a given night. The resulting integer is an 8 digit number. The digits are YYMMDDxxx where YY is the years since 2000, MM and DD are the month and day. xxx are 000, and are incremented for up to 1000 unique job ID's for a given night. Args: night, str or int. YYYYMMDD of the night to get the starting internal ID for. Returns: internal_id, int. 9 digit number consisting of YYMMDD000. YY is years after 2000, MMDD is month and day. 000 being the starting job number (0). """ if night is None: night = what_night_is_it() night = int(night) internal_id = (night - 20000000) * 1000 return internal_id ################################################# ############ Script Functions ################### ################################################# def batch_script_name(prow): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, JOBDESC, PROCCAMWORD defined) and determines the script file pathname as defined by desi_proc's helper functions. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. Returns: scriptfile, str. The complete pathname to the script file, as it is defined within the desi_proc ecosystem. """ expids = prow['EXPID'] if len(expids) == 0: expids = None pathname = get_desi_proc_batch_file_pathname(night = prow['NIGHT'], exp=expids, \ jobdesc=prow['JOBDESC'], cameras=prow['PROCCAMWORD']) scriptfile = pathname + '.slurm' return scriptfile def check_for_outputs_on_disk(prow, resubmit_partial_complete=True): """ Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect the change in job status after creating and submitting the job for processing. """ prow['STATUS'] = 'UNKNOWN' log = get_logger() job_to_file_map = { 'prestdstar': 'sframe', 'stdstarfit': 'stdstars', 'poststdstar': 'cframe', 'nightlybias': 'biasnight', 'ccdcalib': 'badcolumns', 'arc': 'fitpsf', 'flat': 'fiberflat', 'psfnight': 'psfnight', 'nightlyflat': 'fiberflatnight', 'spectra': 'spectra_tile', 'coadds': 'coadds_tile', 'redshift': 'redrock_tile', } night = prow['NIGHT'] if prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']: filetype = 'redrock_tile' else: filetype = job_to_file_map[prow['JOBDESC']] orig_camword = prow['PROCCAMWORD'] ## if spectro based, look for spectros, else look for cameras if prow['JOBDESC'] in ['stdstarfit','spectra','coadds','redshift']: ## Spectrograph based spectros = camword_to_spectros(prow['PROCCAMWORD']) n_desired = len(spectros) ## Suppress outputs about using tile based files in findfile if only looking for stdstarfits if prow['JOBDESC'] == 'stdstarfit': tileid = None else: tileid = prow['TILEID'] expid = prow['EXPID'][0] existing_spectros = [] for spectro in spectros: if os.path.exists(findfile(filetype=filetype, night=night, expid=expid, spectrograph=spectro, tile=tileid)): existing_spectros.append(spectro) completed = (len(existing_spectros) == n_desired) if not completed and resubmit_partial_complete and len(existing_spectros) > 0: existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)]) prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword) elif prow['JOBDESC'] in ['cumulative','pernight-v0','pernight','perexp']: ## Spectrograph based spectros = camword_to_spectros(prow['PROCCAMWORD']) n_desired = len(spectros) ## Suppress outputs about using tile based files in findfile if only looking for stdstarfits tileid = prow['TILEID'] expid = prow['EXPID'][0] redux_dir = specprod_root() outdir = os.path.join(redux_dir,get_tile_redshift_relpath(tileid,group=prow['JOBDESC'],night=night,expid=expid)) suffix = get_tile_redshift_script_suffix(tileid, group=prow['JOBDESC'], night=night, expid=expid) existing_spectros = [] for spectro in spectros: if os.path.exists(os.path.join(outdir, f"redrock-{spectro}-{suffix}.fits")): existing_spectros.append(spectro) completed = (len(existing_spectros) == n_desired) if not completed and resubmit_partial_complete and len(existing_spectros) > 0: existing_camword = 'a' + ''.join([str(spec) for spec in sorted(existing_spectros)]) prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],existing_camword) else: ## Otheriwse camera based cameras = decode_camword(prow['PROCCAMWORD']) n_desired = len(cameras) if len(prow['EXPID']) > 0: expid = prow['EXPID'][0] else: expid = None if len(prow['EXPID']) > 1 and prow['JOBDESC'] not in ['psfnight','nightlyflat']: log.warning(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']}. This job type only makes " + f"sense with a single exposure. Proceeding with {expid}.") missing_cameras = [] for cam in cameras: if not os.path.exists(findfile(filetype=filetype, night=night, expid=expid, camera=cam)): missing_cameras.append(cam) completed = (len(missing_cameras) == 0) if not completed and resubmit_partial_complete and len(missing_cameras) < n_desired: prow['PROCCAMWORD'] = create_camword(missing_cameras) if completed: prow['STATUS'] = 'COMPLETED' log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " + f"the desired {n_desired} {filetype}'s. Not submitting this job.") elif resubmit_partial_complete and orig_camword != prow['PROCCAMWORD']: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} already has " + f"some {filetype}'s. Submitting smaller camword={prow['PROCCAMWORD']}.") elif not resubmit_partial_complete: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} doesn't have all " + f"{filetype}'s and resubmit_partial_complete=False. "+ f"Submitting full camword={prow['PROCCAMWORD']}.") else: log.info(f"{prow['JOBDESC']} job with exposure(s) {prow['EXPID']} has no " + f"existing {filetype}'s. Submitting full camword={prow['PROCCAMWORD']}.") return prow def create_and_submit(prow, queue='realtime', reservation=None, dry_run=0, joint=False, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper script that takes a processing table row and three modifier keywords, creates a submission script for the compute nodes, and then submits that script to the Slurm scheduler with appropriate dependencies. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be run with desi_proc_joint_fit. Default is False. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated to reflect the change in job status after creating and submitting the job for processing. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ orig_prow = prow.copy() if check_for_outputs: prow = check_for_outputs_on_disk(prow, resubmit_partial_complete) if prow['STATUS'].upper() == 'COMPLETED': return prow prow = create_batch_script(prow, queue=queue, dry_run=dry_run, joint=joint, system_name=system_name) prow = submit_batch_script(prow, reservation=reservation, dry_run=dry_run, strictly_successful=strictly_successful) ## If resubmitted partial, the PROCCAMWORD and SCRIPTNAME will correspond to the pruned values. But we want to ## retain the full job's value, so get those from the old job. if resubmit_partial_complete: prow['PROCCAMWORD'] = orig_prow['PROCCAMWORD'] prow['SCRIPTNAME'] = orig_prow['SCRIPTNAME'] return prow def desi_proc_command(prow, queue=None): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, JOBDESC, PROCCAMWORD defined) and determines the proper command line call to process the data defined by the input row/dict. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default). Returns: cmd, str. The proper command to be submitted to desi_proc to process the job defined by the prow values. """ cmd = 'desi_proc' cmd += ' --batch' cmd += ' --nosubmit' cmd += ' --traceshift' if queue is not None: cmd += f' -q {queue}' if prow['OBSTYPE'].lower() == 'science': if prow['JOBDESC'] == 'prestdstar': cmd += ' --nostdstarfit --nofluxcalib' elif prow['JOBDESC'] == 'poststdstar': cmd += ' --noprestdstarfit --nostdstarfit' elif prow['JOBDESC'] in ['nightlybias', 'ccdcalib']: cmd += ' --nightlybias' pcamw = str(prow['PROCCAMWORD']) cmd += f" --cameras={pcamw} -n {prow['NIGHT']}" if len(prow['EXPID']) > 0: cmd += f" -e {prow['EXPID'][0]}" if prow['BADAMPS'] != '': cmd += ' --badamps={}'.format(prow['BADAMPS']) return cmd def desi_proc_joint_fit_command(prow, queue=None): """ Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, PROCCAMWORD defined) and determines the proper command line call to process the data defined by the input row/dict. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'. queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default). Returns: cmd, str. The proper command to be submitted to desi_proc_joint_fit to process the job defined by the prow values. """ cmd = 'desi_proc_joint_fit' cmd += ' --batch' cmd += ' --nosubmit' cmd += ' --traceshift' if queue is not None: cmd += f' -q {queue}' descriptor = prow['OBSTYPE'].lower() night = prow['NIGHT'] specs = str(prow['PROCCAMWORD']) expid_str = ','.join([str(eid) for eid in prow['EXPID']]) cmd += f' --obstype {descriptor}' cmd += f' --cameras={specs} -n {night}' if len(expid_str) > 0: cmd += f' -e {expid_str}' return cmd def create_batch_script(prow, queue='realtime', dry_run=0, joint=False, system_name=None): """ Wrapper script that takes a processing table row and three modifier keywords and creates a submission script for the compute nodes. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue. dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written but not submitted. If dry_run=2, the scripts will not be written nor submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be run with desi_proc_joint_fit. Default is False. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for scriptname. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ log = get_logger() if prow['JOBDESC'] in ['perexp','pernight','pernight-v0','cumulative']: if dry_run > 1: scriptpathname = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'], night=prow['NIGHT'], expid=prow['EXPID'][0]) log.info("Output file would have been: {}".format(scriptpathname)) else: #- run zmtl for cumulative redshifts but not others run_zmtl = (prow['JOBDESC'] == 'cumulative') scripts, failed_scripts = generate_tile_redshift_scripts(tileid=prow['TILEID'], group=prow['JOBDESC'], night=[prow['NIGHT']], expid=prow['EXPID'], run_zmtl=run_zmtl, batch_queue=queue, system_name=system_name, nosubmit=True) if len(failed_scripts) > 0: log.error(f"Redshifts failed for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+ f"tileid={prow['TILEID']}, expid={prow['EXPID']}.") log.info(f"Returned failed scriptname is {failed_scripts}") elif len(scripts) > 1: log.error(f"More than one redshifts returned for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+ f"tileid={prow['TILEID']}, expid={prow['EXPID']}.") log.info(f"Returned scriptnames were {scripts}") else: scriptpathname = scripts[0] else: if joint: cmd = desi_proc_joint_fit_command(prow, queue=queue) else: cmd = desi_proc_command(prow, queue=queue) scriptpathname = batch_script_name(prow) if dry_run > 1: log.info("Output file would have been: {}".format(scriptpathname)) log.info("Command to be run: {}".format(cmd.split())) else: log.info("Running: {}".format(cmd.split())) expids = prow['EXPID'] if len(expids) == 0: expids = None scriptpathname = create_desi_proc_batch_script(night=prow['NIGHT'], exp=expids, cameras=prow['PROCCAMWORD'], jobdesc=prow['JOBDESC'], queue=queue, cmdline=cmd, system_name=system_name) log.info("Outfile is: {}".format(scriptpathname)) prow['SCRIPTNAME'] = os.path.basename(scriptpathname) return prow def submit_batch_script(prow, dry_run=0, reservation=None, strictly_successful=False): """ Wrapper script that takes a processing table row and three modifier keywords and submits the scripts to the Slurm scheduler. Args: prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in desispect.workflow.proctable.get_processing_table_column_defs() dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for scriptname. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ log = get_logger() dep_qids = prow['LATEST_DEP_QID'] dep_list, dep_str = '', '' if len(dep_qids) > 0: jobtype = prow['JOBDESC'] if strictly_successful: depcond = 'afterok' elif jobtype in ['arc', 'psfnight', 'prestdstar', 'stdstarfit']: ## (though psfnight and stdstarfit will require some inputs otherwise they'll go up in flames) depcond = 'afterany' else: ## if 'flat','nightlyflat','poststdstar', or any type of redshift, require strict success of inputs depcond = 'afterok' dep_str = f'--dependency={depcond}:' if np.isscalar(dep_qids): dep_list = str(dep_qids).strip(' \t') if dep_list == '': dep_str = '' else: dep_str += dep_list else: if len(dep_qids)>1: dep_list = ':'.join(np.array(dep_qids).astype(str)) dep_str += dep_list elif len(dep_qids) == 1 and dep_qids[0] not in [None, 0]: dep_str += str(dep_qids[0]) else: dep_str = '' # script = f'{jobname}.slurm' # script_path = pathjoin(batchdir, script) if prow['JOBDESC'] in ['pernight-v0','pernight','perexp','cumulative']: script_path = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'], night=prow['NIGHT'], expid=np.min(prow['EXPID'])) jobname = os.path.split(script_path)[-1] else: batchdir = get_desi_proc_batch_file_path(night=prow['NIGHT']) jobname = batch_script_name(prow) script_path = pathjoin(batchdir, jobname) batch_params = ['sbatch', '--parsable'] if dep_str != '': batch_params.append(f'{dep_str}') if reservation is not None: batch_params.append(f'--reservation={reservation}') batch_params.append(f'{script_path}') if dry_run: ## in dry_run, mock Slurm ID's are generated using CPU seconds. Wait one second so we have unique ID's current_qid = int(time.time() - 1.6e9) time.sleep(1) else: current_qid = subprocess.check_output(batch_params, stderr=subprocess.STDOUT, text=True) current_qid = int(current_qid.strip(' \t\n')) log.info(batch_params) log.info(f'Submitted {jobname} with dependencies {dep_str} and reservation={reservation}. Returned qid: {current_qid}') prow['LATEST_QID'] = current_qid prow['ALL_QIDS'] = np.append(prow['ALL_QIDS'],current_qid) prow['STATUS'] = 'SUBMITTED' prow['SUBMIT_DATE'] = int(time.time()) return prow ############################################# ########## Row Manipulations ############ ############################################# def define_and_assign_dependency(prow, calibjobs): """ Given input processing row and possible calibjobs, this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for 'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. Each value that isn't None must contain 'INTID', and 'LATEST_QID'. If None, it assumes the dependency doesn't exist and no dependency is assigned. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for 'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ if prow['OBSTYPE'] in ['science', 'twiflat']: if calibjobs['nightlyflat'] is not None: dependency = calibjobs['nightlyflat'] elif calibjobs['psfnight'] is not None: dependency = calibjobs['psfnight'] elif calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] prow['JOBDESC'] = 'prestdstar' elif prow['OBSTYPE'] == 'flat': if calibjobs['psfnight'] is not None: dependency = calibjobs['psfnight'] elif calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] elif prow['OBSTYPE'] == 'arc': if calibjobs['ccdcalib'] is not None: dependency = calibjobs['ccdcalib'] else: dependency = calibjobs['nightlybias'] else: dependency = None prow = assign_dependency(prow, dependency) return prow def assign_dependency(prow, dependency): """ Given input processing row and possible arcjob (processing row for psfnight) and flatjob (processing row for nightlyflat), this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow. Args: prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for 'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'. dependency, NoneType or scalar/list/array of Table.Row, dict. Processing row corresponding to the required input for the job in prow. This must contain keyword accessible values for 'INTID', and 'LATEST_QID'. If None, it assumes the dependency doesn't exist and no dependency is assigned. Returns: prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for 'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'. Note: This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the input object in memory may or may not be changed. As of writing, a row from a table given to this function will not change during the execution of this function (but can be overwritten explicitly with the returned row if desired). """ prow['INT_DEP_IDS'] = np.ndarray(shape=0).astype(int) prow['LATEST_DEP_QID'] = np.ndarray(shape=0).astype(int) if dependency is not None: if type(dependency) in [list, np.array]: ids, qids = [], [] for curdep in dependency: if still_a_dependency(curdep): ids.append(curdep['INTID']) qids.append(curdep['LATEST_QID']) prow['INT_DEP_IDS'] = np.array(ids, dtype=int) prow['LATEST_DEP_QID'] = np.array(qids, dtype=int) elif type(dependency) in [dict, OrderedDict, Table.Row] and still_a_dependency(dependency): prow['INT_DEP_IDS'] = np.array([dependency['INTID']], dtype=int) prow['LATEST_DEP_QID'] = np.array([dependency['LATEST_QID']], dtype=int) return prow def still_a_dependency(dependency): """ Defines the criteria for which a dependency is deemed complete (and therefore no longer a dependency). Args: dependency, Table.Row or dict. Processing row corresponding to the required input for the job in prow. This must contain keyword accessible values for 'STATUS', and 'LATEST_QID'. Returns: bool. False if the criteria indicate that the dependency is completed and no longer a blocking factor (ie no longer a genuine dependency). Returns True if the dependency is still a blocking factor such that the slurm scheduler needs to be aware of the pending job. """ return dependency['LATEST_QID'] > 0 and dependency['STATUS'] != 'COMPLETED' def get_type_and_tile(erow): """ Trivial function to return the OBSTYPE and the TILEID from an exposure table row Args: erow, Table.Row or dict. Must contain 'OBSTYPE' and 'TILEID' as keywords. Returns: tuple (str, str), corresponding to the OBSTYPE and TILEID values of the input erow. """ return str(erow['OBSTYPE']).lower(), erow['TILEID'] ############################################# ######### Table manipulators ############ ############################################# def parse_previous_tables(etable, ptable, night): """ This takes in the exposure and processing tables and regenerates all the working memory variables needed for the daily processing script. Used by the daily processing to define most of its state-ful variables into working memory. If the processing table is empty, these are simply declared and returned for use. If the code had previously run and exited (or crashed), however, this will all the code to re-establish itself by redefining these values. Args: etable, Table, Exposure table of all exposures that have been dealt with thus far. ptable, Table, Processing table of all exposures that have been processed. night, str or int, the night the data was taken. Returns: arcs, list of dicts, list of the individual arc jobs used for the psfnight (NOT all the arcs, if multiple sets existed) flats, list of dicts, list of the individual flat jobs used for the nightlyflat (NOT all the flats, if multiple sets existed) sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile) calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. curtype, None, the obstype of the current job being run. Always None as first new job will define this. lasttype, str or None, the obstype of the last individual exposure row to be processed. curtile, None, the tileid of the current job (if science). Otherwise None. Always None as first new job will define this. lasttile, str or None, the tileid of the last job (if science). Otherwise None. internal_id, int, an internal identifier unique to each job. Increments with each new job. This is the latest unassigned value. """ log = get_logger() arcs, flats, sciences = [], [], [] calibjobs = {'nightlybias': None, 'ccdcalib': None, 'psfnight': None, 'nightlyflat': None} curtype,lasttype = None,None curtile,lasttile = None,None if len(ptable) > 0: prow = ptable[-1] internal_id = int(prow['INTID'])+1 lasttype,lasttile = get_type_and_tile(ptable[-1]) jobtypes = ptable['JOBDESC'] if 'nightlybias' in jobtypes: calibjobs['nightlybias'] = table_row_to_dict(ptable[jobtypes=='nightlybias'][0]) log.info("Located nightlybias job in exposure table: {}".format(calibjobs['nightlybias'])) if 'ccdcalib' in jobtypes: calibjobs['ccdcalib'] = table_row_to_dict(ptable[jobtypes=='ccdcalib'][0]) log.info("Located ccdcalib job in exposure table: {}".format(calibjobs['ccdcalib'])) if 'psfnight' in jobtypes: calibjobs['psfnight'] = table_row_to_dict(ptable[jobtypes=='psfnight'][0]) log.info("Located joint fit psfnight job in exposure table: {}".format(calibjobs['psfnight'])) elif lasttype == 'arc': seqnum = 10 for row in ptable[::-1]: erow = etable[etable['EXPID']==row['EXPID'][0]] if row['OBSTYPE'].lower() == 'arc' and int(erow['SEQNUM'])<seqnum: arcs.append(table_row_to_dict(row)) seqnum = int(erow['SEQNUM']) else: break ## Because we work backword to fill in, we need to reverse them to get chronological order back arcs = arcs[::-1] if 'nightlyflat' in jobtypes: calibjobs['nightlyflat'] = table_row_to_dict(ptable[jobtypes=='nightlyflat'][0]) log.info("Located joint fit nightlyflat job in exposure table: {}".format(calibjobs['nightlyflat'])) elif lasttype == 'flat': for row in ptable[::-1]: erow = etable[etable['EXPID']==row['EXPID'][0]] if row['OBSTYPE'].lower() == 'flat' and int(erow['SEQTOT']) < 5: if float(erow['EXPTIME']) > 100.: flats.append(table_row_to_dict(row)) else: break flats = flats[::-1] if lasttype.lower() == 'science': for row in ptable[::-1]: if row['OBSTYPE'].lower() == 'science' and row['TILEID'] == lasttile and \ row['JOBDESC'] == 'prestdstar' and row['LASTSTEP'] != 'skysub': sciences.append(table_row_to_dict(row)) else: break sciences = sciences[::-1] else: internal_id = night_to_starting_iid(night) return arcs,flats,sciences, \ calibjobs, \ curtype, lasttype, \ curtile, lasttile,\ internal_id def update_and_recurvsively_submit(proc_table, submits=0, resubmission_states=None, ptab_name=None, dry_run=0,reservation=None): """ Given an processing table, this loops over job rows and resubmits failed jobs (as defined by resubmission_states). Before submitting a job, it checks the dependencies for failures. If a dependency needs to be resubmitted, it recursively follows dependencies until it finds the first job without a failed dependency and resubmits that. Then resubmits the other jobs with the new Slurm jobID's for proper dependency coordination within Slurm. Args: proc_table, Table, the processing table with a row per job. submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler. resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a possible Slurm scheduler state, where you wish for jobs with that outcome to be resubmitted ptab_name, str, the full pathname where the processing table should be saved. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. Returns: proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have been updated for those jobs that needed to be resubmitted. submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is the number of submissions made from this function call plus the input submits value. Note: This modifies the inputs of both proc_table and submits and returns them. """ log = get_logger() if resubmission_states is None: resubmission_states = get_resubmission_states() log.info(f"Resubmitting jobs with current states in the following: {resubmission_states}") proc_table = update_from_queue(proc_table, dry_run=False) log.info("Updated processing table queue information:") cols = ['INTID','EXPID','OBSTYPE','JOBDESC','TILEID','LATEST_QID','STATUS'] print(np.array(cols)) for row in proc_table: print(np.array(row[cols])) print("\n") id_to_row_map = {row['INTID']: rown for rown, row in enumerate(proc_table)} for rown in range(len(proc_table)): if proc_table['STATUS'][rown] in resubmission_states: proc_table, submits = recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name, resubmission_states, reservation, dry_run) return proc_table, submits def recursive_submit_failed(rown, proc_table, submits, id_to_row_map, ptab_name=None, resubmission_states=None, reservation=None, dry_run=0): """ Given a row of a processing table and the full processing table, this resubmits the given job. Before submitting a job, it checks the dependencies for failures in the processing table. If a dependency needs to be resubmitted, it recursively follows dependencies until it finds the first job without a failed dependency and resubmits that. Then resubmits the other jobs with the new Slurm jobID's for proper dependency coordination within Slurm. Args: rown, Table.Row, the row of the processing table that you want to resubmit. proc_table, Table, the processing table with a row per job. submits, int, the number of submissions made to the queue. Used for saving files and in not overloading the scheduler. id_to_row_map, dict, lookup dictionary where the keys are internal ids (INTID's) and the values are the row position in the processing table. ptab_name, str, the full pathname where the processing table should be saved. resubmission_states, list or array of strings, each element should be a capitalized string corresponding to a possible Slurm scheduler state, where you wish for jobs with that outcome to be resubmitted reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). Returns: proc_table: Table, a table with the same rows as the input except that Slurm and jobid relevant columns have been updated for those jobs that needed to be resubmitted. submits: int, the number of submissions made to the queue. This is incremented from the input submits, so it is the number of submissions made from this function call plus the input submits value. Note: This modifies the inputs of both proc_table and submits and returns them. """ log = get_logger() row = proc_table[rown] log.info(f"Identified row {row['INTID']} as needing resubmission.") log.info(f"{row['INTID']}: Expid(s): {row['EXPID']} Job: {row['JOBDESC']}") if resubmission_states is None: resubmission_states = get_resubmission_states() ideps = proc_table['INT_DEP_IDS'][rown] if ideps is None: proc_table['LATEST_DEP_QID'][rown] = np.ndarray(shape=0).astype(int) else: all_valid_states = list(resubmission_states.copy()) all_valid_states.extend(['RUNNING','PENDING','SUBMITTED','COMPLETED']) for idep in np.sort(np.atleast_1d(ideps)): if proc_table['STATUS'][id_to_row_map[idep]] not in all_valid_states: log.warning(f"Proc INTID: {proc_table['INTID'][rown]} depended on" + f" INTID {proc_table['INTID'][id_to_row_map[idep]]}" + f" but that exposure has state" + f" {proc_table['STATUS'][id_to_row_map[idep]]} that" + f" isn't in the list of resubmission states." + f" Exiting this job's resubmission attempt.") return proc_table, submits qdeps = [] for idep in np.sort(np.atleast_1d(ideps)): if proc_table['STATUS'][id_to_row_map[idep]] in resubmission_states: proc_table, submits = recursive_submit_failed(id_to_row_map[idep], proc_table, submits, id_to_row_map, reservation=reservation, dry_run=dry_run) qdeps.append(proc_table['LATEST_QID'][id_to_row_map[idep]]) qdeps = np.atleast_1d(qdeps) if len(qdeps) > 0: proc_table['LATEST_DEP_QID'][rown] = qdeps else: log.error(f"number of qdeps should be 1 or more: Rown {rown}, ideps {ideps}") proc_table[rown] = submit_batch_script(proc_table[rown], reservation=reservation, strictly_successful=True, dry_run=dry_run) submits += 1 if not dry_run: sleep_and_report(1, message_suffix=f"after submitting job to queue") if submits % 10 == 0: if ptab_name is None: write_table(proc_table, tabletype='processing', overwrite=True) else: write_table(proc_table, tablename=ptab_name, overwrite=True) sleep_and_report(2, message_suffix=f"after writing to disk") if submits % 100 == 0: proc_table = update_from_queue(proc_table) if ptab_name is None: write_table(proc_table, tabletype='processing', overwrite=True) else: write_table(proc_table, tablename=ptab_name, overwrite=True) sleep_and_report(10, message_suffix=f"after updating queue and writing to disk") return proc_table, submits ######################################### ######## Joint fit ############## ######################################### def joint_fit(ptable, prows, internal_id, queue, reservation, descriptor, z_submit_types=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Given a set of prows, this generates a processing table row, creates a batch script, and submits the appropriate joint fitting job given by descriptor. If the joint fitting job is standard star fitting, the post standard star fits for all the individual exposures also created and submitted. The returned ptable has all of these rows added to the table given as input. Args: ptable, Table. The processing table where each row is a processed job. prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. descriptor, str. Description of the joint fitting job. Can either be 'science' or 'stdstarfit', 'arc' or 'psfnight', or 'flat' or 'nightlyflat'. z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each exposure. If not specified or None, then no redshifts are submitted. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu Returns: ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case of a stdstarfit, the poststdstar science exposure jobs. joint_prow, dict. Row of a processing table corresponding to the joint fit job. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). """ log = get_logger() if len(prows) < 1: return ptable, None, internal_id if descriptor is None: return ptable, None elif descriptor == 'arc': descriptor = 'psfnight' elif descriptor == 'flat': descriptor = 'nightlyflat' elif descriptor == 'science': if z_submit_types is None or len(z_submit_types) == 0: descriptor = 'stdstarfit' if descriptor not in ['psfnight', 'nightlyflat', 'science','stdstarfit']: return ptable, None, internal_id log.info(" ") log.info(f"Joint fit criteria found. Running {descriptor}.\n") if descriptor == 'science': joint_prow = make_joint_prow(prows, descriptor='stdstarfit', internal_id=internal_id) else: joint_prow = make_joint_prow(prows, descriptor=descriptor, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) if descriptor in ['science','stdstarfit']: if descriptor == 'science': zprows = [] log.info(" ") log.info(f"Submitting individual science exposures now that joint fitting of standard stars is submitted.\n") for row in prows: if row['LASTSTEP'] == 'stdstarfit': continue row['JOBDESC'] = 'poststdstar' row['INTID'] = internal_id internal_id += 1 row['ALL_QIDS'] = np.ndarray(shape=0).astype(int) row = assign_dependency(row, joint_prow) row = create_and_submit(row, queue=queue, reservation=reservation, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(row) if descriptor == 'science' and row['LASTSTEP'] == 'all': zprows.append(row) ## Now run redshifts if descriptor == 'science' and len(zprows) > 0: log.info(" ") for zsubtype in z_submit_types: if zsubtype == 'perexp': for zprow in zprows: log.info(f"Submitting redshift fit of type {zsubtype} for TILEID {zprow['TILEID']} and EXPID {zprow['EXPID']}.\n") joint_prow = make_joint_prow([zprow], descriptor=zsubtype, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) else: log.info(f"Submitting joint redshift fits of type {zsubtype} for TILEID {zprows[0]['TILEID']}.\n") joint_prow = make_joint_prow(zprows, descriptor=zsubtype, internal_id=internal_id) internal_id += 1 joint_prow = create_and_submit(joint_prow, queue=queue, reservation=reservation, joint=True, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) ptable.add_row(joint_prow) if descriptor in ['psfnight', 'nightlyflat']: log.info(f"Setting the calibration exposures as calibrators in the processing table.\n") ptable = set_calibrator_flag(prows, ptable) return ptable, joint_prow, internal_id ## wrapper functions for joint fitting def science_joint_fit(ptable, sciences, internal_id, queue='realtime', reservation=None, z_submit_types=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the stdstarfit joint fit and redshift fitting. All variables are the same except: Arg 'sciences' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'science'. """ return joint_fit(ptable=ptable, prows=sciences, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='science', z_submit_types=z_submit_types, dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def flat_joint_fit(ptable, flats, internal_id, queue='realtime', reservation=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the nightlyflat joint fit. All variables are the same except: Arg 'flats' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'nightlyflat'. """ return joint_fit(ptable=ptable, prows=flats, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='nightlyflat', dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def arc_joint_fit(ptable, arcs, internal_id, queue='realtime', reservation=None, dry_run=0, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Wrapper function for desiproc.workflow.procfuns.joint_fit specific to the psfnight joint fit. All variables are the same except: Arg 'arcs' is mapped to the prows argument of joint_fit. The joint_fit argument descriptor is pre-defined as 'psfnight'. """ return joint_fit(ptable=ptable, prows=arcs, internal_id=internal_id, queue=queue, reservation=reservation, descriptor='psfnight', dry_run=dry_run, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name) def make_joint_prow(prows, descriptor, internal_id): """ Given an input list or array of processing table rows and a descriptor, this creates a joint fit processing job row. It starts by copying the first input row, overwrites relevant columns, and defines the new dependencies (based on the input prows). Args: prows, list or array of dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. descriptor, str. Description of the joint fitting job. Can either be 'stdstarfit', 'psfnight', or 'nightlyflat'. internal_id, int, the next internal id to be used for assignment (already incremented up from the last used id number used). Returns: joint_prow, dict. Row of a processing table corresponding to the joint fit job. """ first_row = prows[0] joint_prow = first_row.copy() joint_prow['INTID'] = internal_id joint_prow['JOBDESC'] = descriptor joint_prow['LATEST_QID'] = -99 joint_prow['ALL_QIDS'] = np.ndarray(shape=0).astype(int) joint_prow['SUBMIT_DATE'] = -99 joint_prow['STATUS'] = 'U' joint_prow['SCRIPTNAME'] = '' joint_prow['EXPID'] = np.array([ currow['EXPID'][0] for currow in prows ], dtype=int) joint_prow = assign_dependency(joint_prow,dependency=prows) return joint_prow def checkfor_and_submit_joint_job(ptable, arcs, flats, sciences, calibjobs, lasttype, internal_id, z_submit_types=None, dry_run=0, queue='realtime', reservation=None, strictly_successful=False, check_for_outputs=True, resubmit_partial_complete=True, system_name=None): """ Takes all the state-ful data from daily processing and determines whether a joint fit needs to be submitted. Places the decision criteria into a single function for easier maintainability over time. These are separate from the new standard manifest*.json method of indicating a calibration sequence is complete. That is checked independently elsewhere and doesn't interact with this. Args: ptable, Table, Processing table of all exposures that have been processed. arcs, list of dicts, list of the individual arc jobs to be used for the psfnight (NOT all the arcs, if multiple sets existed). May be empty if none identified yet. flats, list of dicts, list of the individual flat jobs to be used for the nightlyflat (NOT all the flats, if multiple sets existed). May be empty if none identified yet. sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile). May be empty if none identified yet. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. lasttype, str or None, the obstype of the last individual exposure row to be processed. internal_id, int, an internal identifier unique to each job. Increments with each new job. This is the smallest unassigned value. z_submit_types: list of str's. The "group" types of redshifts that should be submitted with each exposure. If not specified or None, then no redshifts are submitted. dry_run, int, If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If dry_run=2, the scripts will not be writter or submitted. Logging will remain the same for testing as though scripts are being submitted. Default is 0 (false). queue, str. The name of the queue to submit the jobs to. If None is given the current desi_proc default is used. reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation. strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is less desirable because e.g. the sciences can run with SVN default calibrations rather than failing completely from failed calibrations. Default is False. check_for_outputs, bool. Default is True. If True, the code checks for the existence of the expected final data products for the script being submitted. If all files exist and this is True, then the script will not be submitted. If some files exist and this is True, only the subset of the cameras without the final data products will be generated and submitted. resubmit_partial_complete, bool. Default is True. Must be used with check_for_outputs=True. If this flag is True, jobs with some prior data are pruned using PROCCAMWORD to only process the remaining cameras not found to exist. system_name (str): batch system name, e.g. cori-haswell, cori-knl, permutter-gpu Returns: ptable, Table, Processing table of all exposures that have been processed. calibjobs, dict. Dictionary containing 'nightlybias', 'ccdcalib', 'psfnight' and 'nightlyflat'. Each key corresponds to a Table.Row or None. The table.Row() values are for the corresponding calibration job. sciences, list of dicts, list of the most recent individual prestdstar science exposures (if currently processing that tile). May be empty if none identified yet or we just submitted them for processing. internal_id, int, if no job is submitted, this is the same as the input, otherwise it is incremented upward from from the input such that it represents the smallest unused ID. """ if lasttype == 'science' and len(sciences) > 0: log = get_logger() skysubonly = np.array([sci['LASTSTEP'] == 'skysub' for sci in sciences]) if np.all(skysubonly): log.error("Identified all exposures in joint fitting request as skysub-only. Not submitting") sciences = [] return ptable, calibjobs, sciences, internal_id if np.any(skysubonly): log.error("Identified skysub-only exposures in joint fitting request") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences])) sciences = (np.array(sciences,dtype=object)[~skysubonly]).tolist() log.info("Removed skysub only exposures in joint fitting:") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("LASTSTEP's: {}".format([row['LASTSTEP'] for row in sciences])) from collections import Counter tiles = np.array([sci['TILEID'] for sci in sciences]) counts = Counter(tiles) if len(counts.most_common()) > 1: log.error("Identified more than one tile in a joint fitting request") log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) log.info("Tileid's: {}".format(tiles)) log.info("Returning without joint fitting any of these exposures.") # most_common, nmost_common = counts.most_common()[0] # if most_common == -99: # most_common, nmost_common = counts.most_common()[1] # log.warning(f"Given multiple tiles to jointly fit: {counts}. "+ # "Only processing the most common non-default " + # f"tile: {most_common} with {nmost_common} exposures") # sciences = (np.array(sciences,dtype=object)[tiles == most_common]).tolist() # log.info("Tiles and exposure id's being submitted for joint fitting:") # log.info("Expid's: {}".format([row['EXPID'] for row in sciences])) # log.info("Tileid's: {}".format([row['TILEID'] for row in sciences])) sciences = [] return ptable, calibjobs, sciences, internal_id ptable, tilejob, internal_id = science_joint_fit(ptable, sciences, internal_id, z_submit_types=z_submit_types, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) if tilejob is not None: sciences = [] elif lasttype == 'flat' and calibjobs['nightlyflat'] is None and len(flats) == 12: ## Note here we have an assumption about the number of expected flats being greater than 11 ptable, calibjobs['nightlyflat'], internal_id \ = flat_joint_fit(ptable, flats, internal_id, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) elif lasttype == 'arc' and calibjobs['psfnight'] is None and len(arcs) == 5: ## Note here we have an assumption about the number of expected arcs being greater than 4 ptable, calibjobs['psfnight'], internal_id \ = arc_joint_fit(ptable, arcs, internal_id, dry_run=dry_run, queue=queue, reservation=reservation, strictly_successful=strictly_successful, check_for_outputs=check_for_outputs, resubmit_partial_complete=resubmit_partial_complete, system_name=system_name ) return ptable, calibjobs, sciences, internal_id def set_calibrator_flag(prows, ptable): """ Sets the "CALIBRATOR" column of a procesing table row to 1 (integer representation of True) for all input rows. Used within joint fitting code to flag the exposures that were input to the psfnight or nightlyflat for later reference. Args: prows, list or array of Table.Rows or dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. ptable, Table. The processing table where each row is a processed job. Returns: ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case of a stdstarfit, the poststdstar science exposure jobs. """ for prow in prows: ptable['CALIBRATOR'][ptable['INTID'] == prow['INTID']] = 1 return ptable
__script__.title = 'KKB Measurement Script' __script__.version = '3.1' from gumpy.commons import sics from org.gumtree.gumnix.sics.control import ServerStatus from pickle import Pickler, Unpickler import time from math import log as ln from math import exp, isnan, isinf, sin from __builtin__ import max as builtin_max from __builtin__ import min as builtin_min from org.eclipse.swt.widgets import FileDialog from org.eclipse.swt import SWT from org.eclipse.swt.widgets import Display from java.io import File from gumpy.nexus.fitting import Fitting, GAUSSIAN_FITTING import math from Internal import sample_stage ''' Disable dataset caching ''' DatasetFactory.__cache_enabled__ = False SINGLE_TYPE = SWT.SINGLE SAVE_TYPE = SWT.SAVE MULTI_TYPE = SWT.MULTI class __Display_Runnable__(Runnable): def __init__(self, type=SINGLE_TYPE, ext=['*.*']): self.filename = None self.filenames = None self.path = None self.type = type self.ext = ext def run(self): global __UI__ dialog = FileDialog(__UI__.getShell(), self.type); dialog.setFilterExtensions(self.ext) dialog.open() self.filename = dialog.getFilterPath() + File.separator + dialog.getFileName() self.filenames = dialog.getFileNames() self.path = dialog.getFilterPath() def open_file_dialog(type=SWT.SINGLE, ext=['*.*']): __display_run__ = __Display_Runnable__(type, ext) Display.getDefault().asyncExec(__display_run__) while __display_run__.filename is None: time.sleep(0.5) if type == SWT.MULTI: fns = [] for fn in __display_run__.filenames: fns.append(__display_run__.path + '/' + fn) return fns return __display_run__.filename # # templates reference_templates_dict = {} reference_templates_dict['Si111'] = 180.3565 reference_templates_dict['Si311'] = -0.4100 steps_templates_list = [] # steps_templates['Background'] = [ # 'time', 'logscale', # [20, 6.0e-5, 1200, 1200]] # steps_templates['----------'] = [ # 'time', 'logscale', # [0, 0, 0, 0]] steps_templates_list.append([ 'Si111: Logarithmic Overview Scan', 'time', 'logscale', [17, 1.20e-4, 1, 1200], [30, 22.0, 20, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (few features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [34, 20.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (fine features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [65, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Taiki Scan (15 points)', 'ba', 'logscale', [2, 6.0e-5, 1000, 60], [1, 10000, 1000, 60], [10, 25, 1000, 60]]) ''' steps_templates_list.append([ 'Si111: Kinetic Scan 4 points', 'time', 'logscale', [ 0, 6.0e-5, 1, 1200], [1, 5.0e-3, 180, 1200], [3, 1.5e-2, 180, 1200]]) ''' steps_templates_list.append([ '----------', 'time', 'logscale', [0, 0, 0, 0]]) steps_templates_list.append([ 'Si311: Logarithmic Overview Scan', 'time', 'logscale', [17, 2.0e-5, 1, 1200], [30, 23.0, 20, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 80+29)', 'ba', 'logscale', [80, 2e-5, 1000, 1200], [29, 15.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 40+33)', 'ba', 'logscale', [40, 2e-5, 1000, 1200], [33, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, Taiki)', 'ba', 'logscale', [33, 2e-5, 1000, 1200], [25, 20.0, 1000, 1200]]) ret = sample_stage.check_declarations() if not ret[0] : open_warning(ret[1]) reload(sample_stage) SAMPLE_STAGES = sample_stage.StagePool() # # export path __EXPORT_PATH__ = 'V:/shared/KKB Logbook/Temp Plot Data Repository/' if not os.path.exists(__EXPORT_PATH__): os.makedirs(__EXPORT_PATH__) # # User Details user_name = Par('string', 'Christine', options=['Christine', 'Lela', 'Jitendra']) user_name.title = 'Name' user_email = Par('string', 'cre@ansto.gov.au', options=['cre@ansto.gov.au', 'liliana.decampo@ansto.gov.au', 'jtm@ansto.gov.au']) user_email.title = 'EMail' g0 = Group('User Details') g0.numColumns = 2 g0.add(user_name, user_email) # # Sample Details sample_name = Par('string', 'UNKNOWN', options=['Empty Cell', 'Empty Beam'], command="sample_thickness.enabled = sample_name.value not in ['Empty Cell', 'Empty Beam']") sample_name.title = 'Name' sample_description = Par('string', 'UNKNOWN') sample_description.title = 'Description' sample_thickness = Par('string', '1', options=['0.01', '0.1', '1.0', '10.0']) sample_thickness.title = 'Thickness (mm)' g0 = Group('Sample Details') g0.numColumns = 2 g0.add(sample_name, sample_thickness, sample_description) # Group('Sample Details').add(sample_name, sample_description, sample_thickness) # # Crystal crystal_name = Par('string', 'UNKNOWN') crystal_name.title = 'Name' crystal_name.enabled = False try: m2om = sics.getValue('/instrument/crystal/m2om').getFloatData() if m2om > 90: crystal_name.value = 'Si111 (4.74 Angstroms)' else: crystal_name.value = 'Si311 (2.37 Angstroms)' except: pass g0 = Group('Crystal Info') g0.numColumns = 2 g0.add(crystal_name) # CRYSTAL END ############################################# # SLIT 1 ####################################################################### def updateOffset(gapBox, offsetBox): offsetBox.enabled = 'fully' not in gapBox.value def getSlitGapAndOffset(aPath, a0, bPath, b0): try: a = sics.getValue(aPath).getFloatData() b = sics.getValue(bPath).getFloatData() gap = (a - a0 - (b - b0)) / 1.0 offset = (a - a0 + (b - b0)) / 2.0 return (gap, offset) except: return (float('nan'), float('nan')) crystal = str(crystal_name.value) if 'Si111' in crystal: ss1r0 = 28.35 ss1l0 = 27.75 elif 'Si311' in crystal: ss1r0 = -9.16 ss1l0 = -9.76 else: ss1r0 = float('nan') ss1l0 = float('nan') ss1u0 = -8.04 ss1d0 = -7.30 (ss1vg, ss1vo) = getSlitGapAndOffset('/instrument/slits/ss1u', ss1u0, '/instrument/slits/ss1d', ss1d0) (ss1hg, ss1ho) = getSlitGapAndOffset('/instrument/slits/ss1r', ss1r0, '/instrument/slits/ss1l', ss1l0) pss_ss1vg = Par('string', '%.1f' % ss1vg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1vg, pss_ss1vo)') pss_ss1vg.title = 'Vertical Gap (mm)' # pss_ss1vg.colspan = 50 pss_ss1vo = Par('float', ss1vo) pss_ss1vo.title = 'Vertical Offset (mm)' # pss_ss1vo.colspan = 50 pss_ss1hg = Par('string', '%.1f' % ss1hg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1hg, pss_ss1ho)') pss_ss1hg.title = 'Horizontal Gap (mm)' # pss_ss1hg.colspan = 50 pss_ss1ho = Par('float', ss1ho) pss_ss1ho.title = 'Horizontal Offset (mm)' # pss_ss1ho.colspan = 50 g0 = Group('Sample Slit Settings') g0.numColumns = 2 g0.add(pss_ss1vg, pss_ss1vo, pss_ss1hg, pss_ss1ho) # SLIT 1 END ####################################################################### # SAMPLE ENVIRONMENT BLOCK ######################################################### gse = Group('Sample Environment') gse.numColumns = 10 se_enabled1 = Par('bool', False, command = 'toggle_se(1)') se_enabled1.title = 'Controller 1' se_enabled1.colspan = 1 se_ctr1 = Par('string', '', options = []) se_ctr1.title = 'name' se_ctr1.colspan = 2 se_ctr1.enabled = False se_pos1 = Par('float', 0.) se_pos1 .title = 'Values' se_pos1.colspan = 6 se_pos1.enabled = False se_wait1 = Par('int', 0) se_wait1 .title = 'Wait' se_wait1.colspan = 1 se_wait1.enabled = False se_enabled2 = Par('bool', False, command = 'toggle_se(2)') se_enabled2.title = 'Controller 2' se_enabled2.colspan = 1 se_ctr2 = Par('string', '', options = []) se_ctr2.title = 'name' se_ctr2.colspan = 2 se_ctr2.enabled = False se_pos2 = Par('float', 0.) se_pos2 .title = 'Values' se_pos2.colspan = 6 se_pos2.enabled = False se_wait2 = Par('int', 0) se_wait2 .title = 'Wait' se_wait2.colspan = 1 se_wait2.enabled = False se_enabled3 = Par('bool', False, command = 'toggle_se(3)') se_enabled3.title = 'Controller 3' se_enabled3.colspan = 1 se_ctr3 = Par('string', '', options = []) se_ctr3.title = 'name' se_ctr3.colspan = 2 se_ctr3.enabled = False se_pos3 = Par('float', 0.) se_pos3 .title = 'Values' se_pos3.colspan = 6 se_pos3.enabled = False se_wait3 = Par('int', 0) se_wait3 .title = 'Wait' se_wait3.colspan = 1 se_wait3.enabled = False gse.add(se_enabled1, se_ctr1, se_pos1, se_wait1, se_enabled2, se_ctr2, se_pos2, se_wait2, se_enabled3, se_ctr3, se_pos3, se_wait3,) devices = sicsext.getDrivables() se_ctr1.options = devices se_ctr2.options = devices se_ctr3.options = devices def toggle_se(id): id = int(id) if id == 1: flag = se_enabled1.value se_ctr1.enabled = flag se_pos1.enabled = flag se_wait1.enabled = flag elif id == 2: flag = se_enabled2.value se_ctr2.enabled = flag se_pos2.enabled = flag se_wait2.enabled = flag elif id == 3: flag = se_enabled3.value se_ctr3.enabled = flag se_pos3.enabled = flag se_wait3.enabled = flag else: raise 'illegal index for sample environment' # SAMPLE ENVIRONMENT BLOCK END ##################################################### ## Scan parameters ########################################################################################################## scan_variable = Par('string', 'm2om [deg]', options=[ #'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]'], command="scan_variable_plot.value = scan_variable.value") scan_variable.title = 'Scan Variable' scan_variable.colspan = 25 scan_reference = Par('float', '0.0') scan_reference.title = 'Zero Angle' scan_reference.colspan = 25 for key in reference_templates_dict.keys(): if key in crystal_name.value: scan_reference.value = reference_templates_dict[key] scan_mode = Par('string', 'ba', options=['ba', 'time'], command='setScanMode()') scan_mode.title = 'Acquisition Mode' scan_mode.colspan = 25 scan_min_time = Par('int', '5') scan_min_time.title = 'Min Time (sec)' scan_min_time.colspan = 25 empty_label = Par('label', '') empty_label.colspan = 25 scan_sample_stage = Par('string', '', command = 'sample_stage_changed()') scan_sample_stage.colspan = 25 scan_sample_stage.title = 'Sample Stage' scan_sample_stage.options = SAMPLE_STAGES.get_stage_names() current_stage = SAMPLE_STAGES.get_stage_in_service() if not current_stage is None: scan_sample_stage.value = current_stage.get_name() scan_sample_position = Par('string', 'fixed') scan_sample_position.title = 'Sample Position' scan_sample_position.colspan = 25 scan_sample_position.options = ['fixed', '----------'] if not current_stage is None: scan_sample_position.options += current_stage.get_sample_indexes() logscale_position = Par('bool', False, command='setStepTitles()') logscale_position.title = 'Logarithmic Steps' logscale_position.colspan = 25 negative_steps = Par('bool', False) negative_steps.title = 'Negative Steps' negative_steps.colspan = 25 steps_label = Par('label', 'Please choose scan template or adjust steps manually: ') steps_label.colspan = 200 steps_templates = Par('string', '', options=[item[0] for item in steps_templates_list], command='setTemplate()') steps_templates.title = 'Scan Template' steps_templates.colspan = 100 early_exit_enabled = Par('bool', True, command = "set_early_exit_enabled()") early_exit_enabled.title = "Enable Early Exit" early_exit_enabled.colspan = 25 background_frames = Par('int', 3) background_frames.title = 'Background Frames' background_frames.colspan = 25 background_threshold = Par('float', 0.26) background_threshold.title = 'Background Threshold' background_threshold.colspan = 25 # steps_space = Par('space', '') # steps_space.colspan = 10 g0 = Group('Scan Parameters') g0.numColumns = 100 # 9 g0.add(scan_variable, scan_mode, scan_reference, early_exit_enabled, \ logscale_position, scan_min_time, scan_sample_stage, background_frames, \ negative_steps, empty_label, scan_sample_position, background_threshold, \ steps_label, steps_templates) def sample_stage_changed(): stage = SAMPLE_STAGES.get_stage_by_name(str(scan_sample_stage.value)) # scan_sample_position.value = 'fixed' if not stage is None: scan_sample_position.options = ['fixed', '----------'] + stage.get_sample_indexes() else: scan_sample_position.options = ['fixed', '----------'] def set_early_exit_enabled(): if early_exit_enabled.value: background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False stepInfo = [] for i in xrange(4): steps_e = Par('bool', True, command='setEnabled(%i)' % i) steps_e.title = '(%i)' % (i + 1) steps_e.colspan = 10 steps_m = Par('int', 0, command='clearScanTemplateSelection()') steps_m.title = 'Number of points' steps_m.colspan = 20 steps_s = Par('float', 0, command='clearScanTemplateSelection()') steps_s.title = 'Step Size [deg]' steps_s.colspan = 20 steps_p = Par('int', 0, command='clearScanTemplateSelection()') steps_p.title = 'Mode Preset' steps_p.colspan = 25 steps_t = Par('int', 1200, command='clearScanTemplateSelection()') steps_t.title = 'Max Time' steps_t.colspan = 25 stepInfo.append({'enabled': steps_e, 'dataPoints':steps_m, 'stepSize':steps_s, 'preset':steps_p, 'maxTime':steps_t}) g0.add(steps_e, steps_m, steps_s, steps_p, steps_t) def clearScanTemplateSelection(): steps_templates.value = None btnPlotSteps = Act('btnPlotSteps_clicked()', 'Plot Measurement Steps') # 'compare measurement steps with previous scan') btnPlotSteps.colspan = 50 cnfg_save_btn = Act('saveConfiguration()', 'Save Single Scan Parameters') cnfg_save_btn.colspan = 50 btnTimeEstimation = Act('runTimeEstimation()', 'Time Estimation with selected Data Set') btnTimeEstimation.colspan = 50 txtTimeEstimation = Par('int', '0') txtTimeEstimation.title = 'Time Estimation (min)' txtTimeEstimation.enabled = False txtTimeEstimation.colspan = 50 g0.add(btnPlotSteps, cnfg_save_btn, btnTimeEstimation, txtTimeEstimation) def runTimeEstimation(): if str(scan_mode.value) == 'time': scan = getScan() times = scan['presets'] txtTimeEstimation.value = int((sum(times) + len(times) * 25) / 60.0) # 25 seconds for each move return fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return ds = openDataset(fns[0]) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time data[:] = [data[item[0]] for item in info] # sorting # angle and count rate a0 = [float(angle) for angle in scanVariable] r0 = [float(rate) for rate in data[:]] # angle, counts, max time and min time model = ConfigurationModel() scan = model.scan tMin = model.min_time a1 = scan['angles'] c1 = scan['presets'] t1 = scan['maxTimes'] total = 0.0 for i in xrange(len(a1)): try: rate = sample(a0, r0, a1[i]) time = c1[i] / rate if time < tMin: total += tMin elif time > t1[i]: total += t1[i] else: total += time #print ("angle: " + str(a1[i]) # + " expected counts: " + str(c1[i]) # + " rate:" + str(rate) # + " time:" + str(time) # + " total:" + str(total)) except ValueError as e: if e.message == "OutOfRange": total += t1[i] # add max time else: raise total += int(len(a1) * 25) # 25 seconds for each move txtTimeEstimation.value = int(total / 60.0) def sample(x0, y0, x1): from __builtin__ import max, min if len(x0) != len(y0): raise Exception("len(x0) != len(y0)") x0_min = min(x0) x0_max = max(x0) if len(x0) < 2: raise Exception("len(x0) < 2") if x0_min >= x0_max: raise Exception("x0_min >= x0_max") if x1 < x0_min: raise ValueError("OutOfRange") if x0_max < x1: raise ValueError("OutOfRange") i0 = 0 i1 = 1 x0i0 = x0[i0] y0i0 = y0[i0] x0i1 = x0[i1] y0i1 = y0[i1] # in case first x values are equal while x0i0 == x0i1: i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] # not iterable while x0i1 < x1: x0i0 = x0i1 y0i0 = y0i1 i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] return y0i0 + (x1 - x0i0) * (y0i1 - y0i0) / (x0i1 - x0i0) ## Scan parameters END ######################################################################### ## RUN ############################################## cnfg_load_btn = Act('loadConfigurations()', 'Load Multiple Scan Parameters') cnfg_lookup = dict() cnfg_options = Par('string', '', options=[''], command="applyConfiguration()") cnfg_options.title = 'Read' start_scan = Act('startScan(ConfigurationModel())', '############# Run Single Scan #############') cnfg_run_btn = Act('runConfigurations()', '############# Run Multiple Scans #############') g0 = Group('Execute Scans') g0.numColumns = 1 g0.add(start_scan, cnfg_load_btn, cnfg_options, cnfg_run_btn) ## Save/Load Configuration END############################################################################ def saveConfiguration(): file = open_file_dialog(type=SAVE_TYPE, ext=['*.kkb']) try: fh = open(file, 'w') except: print 'not saved' return try: p = Pickler(fh) # header p.dump('KKB') # content model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): p.dump(att) p.dump(att_value) print 'saved' finally: fh.close() def loadConfigurations(): fileList = open_file_dialog(type=MULTI_TYPE, ext=['*.kkb']) if not fileList: return finalDict = dict() finalNames = [] for path in fileList: fh = open(path, 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', os.path.basename(path) else: model = ConfigurationModel() # set defaults model.negative = False # old models may not have this attribute for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', os.path.basename(path) break setattr(model, att, p.load()) else: name = os.path.basename(path) finalDict[name] = path finalNames.append(name) finally: fh.close() cnfg_lookup.clear() cnfg_lookup.update(finalDict) cnfg_options.options = finalNames cnfg_options.value = finalNames[0] if finalNames else '' # time.sleep(0.5) def applyConfiguration(): file = str(cnfg_options.value) if file is None or file == 'None' or file.strip() == '': return fh = open(cnfg_lookup[file], 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: # print 'read:', file model.apply() finally: fh.close() def runConfigurations(): for file in cnfg_options.options: fh = open(cnfg_lookup[file], 'r') try: cnfg_options.command = '' cnfg_options.value = file applyConfiguration() p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: print 'run:', file startScan(model) finally: cnfg_options.command = 'applyConfiguration()' fh.close() # # Plot tubes_label = Par('label', 'Main Detector:') tubes_label.colspan = 1 combine_tube0 = Par('bool', True) combine_tube0.title = ' Tube 0' combine_tube0.colspan = 1 combine_tube1 = Par('bool', True) combine_tube1.title = ' Tube 1' combine_tube1.colspan = 1 combine_tube2 = Par('bool', True) combine_tube2.title = ' Tube 2' combine_tube2.colspan = 1 combine_tube3 = Par('bool', True) combine_tube3.title = ' Tube 3' combine_tube3.colspan = 1 combine_tube4 = Par('bool', True) combine_tube4.title = ' Tube 4' combine_tube4.colspan = 1 combine_tube6 = Par('bool', False) combine_tube6.title = ' Tube 6' combine_tube6.colspan = 1 combine_mode = Par('string', 'combined', options=['individual', 'combined']) combine_mode.title = ' Mode' combine_mode.colspan = 1 trans_tube_label = Par('label', 'Trans Detector: ') trans_tube_label.colspan = 2 check_tube9 = Par('bool', True) check_tube9.title = ' Tube 9: Si (311)' check_tube9.colspan = 2 check_tube10 = Par('bool', False) check_tube10.title = ' Tube 10: Si (111)' check_tube10.colspan = 2 # steps_space = Par('space', '') # steps_space.colspan = 12 scan_variable_plot = Par('string', 'm2om [deg]', options=[ 'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]']) scan_variable_plot.title = 'Scan Variable' scan_variable_plot.colspan = 1 scan_variable_sorting = Par('bool', True) scan_variable_sorting.title = 'Sorting' scan_variable_sorting.colspan = 1 btnPlot = Act('btnPlot_clicked()', 'Plot Selected Data Set') btnPlot.colspan = 8 g0 = Group('Plotting') g0.numColumns = 7 g0.add(tubes_label, combine_tube0, combine_tube1, combine_tube2, combine_tube3, combine_tube4, combine_tube6, combine_mode, trans_tube_label, check_tube9, check_tube10, scan_variable_plot, scan_variable_sorting, btnPlot) # export to csv # btnExport = Act('export_clicked()', 'Export to CSV') ################################# SLIT 2 ########################################################## ss2u0 = 0 # 2.00 ss2d0 = 0 # 1.40 ss2l0 = 0 # 5 0.50 ss2r0 = 0 # -2 -1.00 (ss2vg, ss2vo) = getSlitGapAndOffset('/instrument/slits/ss2u', ss2u0, '/instrument/slits/ss2d', ss2d0) (ss2hg, ss2ho) = getSlitGapAndOffset('/instrument/slits/ss2r', ss2r0, '/instrument/slits/ss2l', ss2l0) pss_ss2vg = Par('string', '%.1f' % ss2vg, options=pss_ss1vg.options, command='updateOffset(pss_ss2vg, pss_ss2vo)') pss_ss2vg.title = 'Vertical Opening (mm)' pss_ss2vo = Par('float', ss2vo) pss_ss2vo.title = 'Vertical Offset (mm)' pss_ss2hg = Par('string', '%.1f' % ss2hg, options=pss_ss1hg.options, command='updateOffset(pss_ss2hg, pss_ss2ho)') pss_ss2hg.title = 'Horizontal Opening (mm)' pss_ss2ho = Par('float', ss2ho) pss_ss2ho.title = 'Horizontal Offset (mm)' g0 = Group('Post-Sample Slit') g0.numColumns = 2 g0.add(pss_ss2vg, pss_ss2vo, pss_ss2hg, pss_ss2ho) ################################# SLIT 2 END ########################################################## ################################# CURVE FITTING START ########################################################## g_fit = Group('Fitting') g_fit.numColumns = 2 #data_name = Par('string', 'total_counts', \ # options = ['total_counts', 'bm1_counts', 'bm2_counts']) #normalise = Par('bool', True) #axis_name = Par('string', '') #axis_name.enabled = True #auto_fit = Par('bool', False) #fit_min = Par('float', 'NaN') #fit_min.title = 'min x' #fit_max = Par('float', 'NaN') #fit_max.title = 'max x' peak_pos = Par('float', 'NaN') peak_pos.title = 'fitting peak position' FWHM = Par('float', 'NaN') FWHM.title = 'fitting FWHM' fact = Act('fit_curve()', 'Fit Again') fact.colspan = 2 #offset_done = Par('bool', False) #act3 = Act('offset_s2()', 'Set Device Zero Offset') g_fit.add(peak_pos, FWHM, fact) def fit_curve(): global Plot1 ds = Plot1.ds if len(ds) == 0: log('Error: no curve to fit in Plot1.\n') return for d in ds: if d.title == 'fitting': Plot1.remove_dataset(d) d0 = ds[0] fitting = Fitting(GAUSSIAN_FITTING) try: fitting.set_histogram(d0) fitting.fitter.setResolutionMultiple(50) val = peak_pos.value if val == val: fitting.set_param('mean', val) val = FWHM.value if val == val: fitting.set_param('sigma', math.fabs(val / 2.35482)) res = fitting.fit() res.var[:] = 0 res.title = 'fitting' Plot1.add_dataset(res) Plot1.pv.getPlot().setCurveMarkerVisible(Plot1.__get_NXseries__(res), False) mean = fitting.params['mean'] mean_err = fitting.errors['mean'] FWHM.value = 2.35482 * math.fabs(fitting.params['sigma']) FWHM_err = 5.54518 * math.fabs(fitting.errors['sigma']) log('POS_OF_PEAK=' + str(mean) + ' +/- ' + str(mean_err)) log('FWHM=' + str(FWHM.value) + ' +/- ' + str(FWHM_err)) log('Chi2 = ' + str(fitting.fitter.getQuality())) peak_pos.value = fitting.mean # print fitting.params except: # traceback.print_exc(file = sys.stdout) log('can not fit\n') ################################# CURVE FITTING END ########################################################## def waitUntilSicsIs(status, dt=0.2): controller = sics.getSicsController() timeout = 5 while True: sics.handleInterrupt() count = 0 while not controller.getServerStatus().equals(status) and count < timeout: time.sleep(dt) count += dt if controller.getServerStatus().equals(status): break else: controller.refreshServerStatus() sics.handleInterrupt() def setStepTitles(): if logscale_position.value: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Factor [%]" else: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Size [deg]" __UI__.updateUI() def setTemplate(): try: matches = [item for item in steps_templates_list if item[0] == steps_templates.value] if len(matches) != 1: steps_templates.value = None return template = matches[0] # ignore '----' if template[0][0] == '-': steps_templates.value = None return scan_mode.value = template[1] if template[2] == 'logscale': logscale_position.value = True elif template[2] == 'linear': logscale_position.value = False setStepTitles() # by default templates measure in positive direction negative_steps.value = False setScanMode() headers = 3 for i in xrange(len(template) - headers): templateItem = template[i + headers] stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = True stepInfoItem['dataPoints'].enabled = True stepInfoItem['dataPoints'].value = templateItem[0] stepInfoItem['stepSize' ].enabled = True stepInfoItem['stepSize' ].value = templateItem[1] stepInfoItem['preset' ].enabled = True stepInfoItem['preset' ].value = templateItem[2] stepInfoItem['maxTime' ].enabled = scan_min_time.enabled stepInfoItem['maxTime' ].value = templateItem[3] for i in xrange(len(template) - headers, len(stepInfo)): stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = False stepInfoItem['dataPoints'].enabled = False stepInfoItem['stepSize' ].enabled = False stepInfoItem['preset' ].enabled = False stepInfoItem['maxTime' ].enabled = False except: pass def setScanMode(): if scan_mode.value == 'time': scan_min_time.enabled = False for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = False else: scan_min_time.enabled = True for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = stepInfoItem['enabled'].value def setEnabled(index): steps_templates.value = None stepItem = stepInfo[index] value = stepItem['enabled'].value stepItem['dataPoints'].enabled = value stepItem['stepSize' ].enabled = value stepItem['preset' ].enabled = value stepItem['maxTime' ].enabled = value and scan_min_time.enabled setTemplate() def getScan(): scan = { 'angles': [], 'presets': [], 'maxTimes': [], 'groups': [] } first = True angle_ref = scan_reference.value angle = angle_ref logscale = False # first data points are always on a linear scale negative = bool(negative_steps.value) for stepInfoItem in stepInfo: if stepInfoItem['enabled'].value: dataPoints = stepInfoItem['dataPoints'].value stepSize = stepInfoItem['stepSize' ].value preset = stepInfoItem['preset' ].value maxTime = stepInfoItem['maxTime' ].value if (dataPoints > 0) and (stepSize <= 0.0): raise Exception('step sizes have to be positive') for i in xrange(dataPoints): if first and (i == 0): angle -= ((dataPoints - 1) / 2.0) * stepSize; elif logscale: # for logscale stepSize is a stepFactor angle = angle_ref + (angle - angle_ref) * (1.0 + 0.01 * stepSize) else: angle += stepSize #print angle scan['angles' ].append(angle) scan['presets' ].append(preset) scan['maxTimes'].append(maxTime) if i == 0: scan['groups'].append(angle) first = False logscale = bool(logscale_position.value) if negative: # negate angles with reference to zero angle scan['angles'] = [angle_ref - (angle - angle_ref) for angle in scan['angles']] return scan def wait_for_idle(): c_time = time.time() while not sics.getSicsController().getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): time.sleep(0.1) if time.time() - c_time > 5: serverStatus = sics.get_status() c_time = time.time() def startScan(configModel): ''' check instrument ready ''' all_ready = False is_ready = False is_shielded = False msg = None try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' is_shielded = sics.getValue('/instrument/GreenPolyShield/greenpolyshield').getStringData().lower() == 'in' if not is_ready: if not is_shielded: msg = 'The instrument is not ready and the green polyshield is not applied. ' \ + 'Please get the '\ + 'instrument ready and apply the polyshield. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' else: msg = 'The instrument is not ready according to the SIS status. ' \ + 'Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' else: if not is_shielded: msg = 'The green polyshield is not applied. ' \ + 'Please apply the polyshield. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' all_ready = is_ready and is_shielded except: pass if not all_ready: if not msg: msg = 'The instrument is not ready according to the SIS status. ' \ + 'Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' is_confirmed = open_question(msg) if not is_confirmed: slog('Instrument is not ready. Quit the scan.') return else: try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' is_shielded = sics.getValue('/instrument/GreenPolyShield/greenpolyshield').getStringData().lower() == 'in' except: pass if not is_ready: slog('scan continued without instrument ready') if not is_shielded: slog('scan continued without green polysheild') ''' setup ''' scanVariable = configModel.scanVariable crystal = configModel.crystal mode = configModel.mode MainDeadTime = 1.08E-6 TransDeadTime = 1.08E-6 if 'Si111' in crystal: empLevel = 0.3 bkgLevel = 0.21 dOmega = 2.3E-6 gDQv = 0.0586 gDQh = 0 wavelength = 4.74 TransmissionTube = 10 TransBackground = 0 # counts per second elif 'Si311' in crystal: empLevel = 0.34 bkgLevel = 0.21 dOmega = 4.6E-7 gDQv = 0.117 gDQh = 0 wavelength = 2.37 TransmissionTube = 9 TransBackground = 0 # counts per second else: print 'selected crystal is invalid' return ''' angles ''' scan = configModel.scan scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if ('m1om' in scanVariable) or ('m2om' in scanVariable): tolerance = 6 approved = False if 'Si111' in crystal: if (180 - tolerance <= scan_angleMin) and (scan_angleMax <= 180 + tolerance): approved = True elif 'Si311' in crystal: if (0 - tolerance <= scan_angleMin) and (scan_angleMax <= 0 + tolerance): approved = True if not approved: print 'angle out of range' return ''' execution ''' sics.execute('hset user/name ' + configModel.user_name) sics.execute('hset user/email ' + configModel.user_email) sics.execute('hset sample/name ' + configModel.sample_name) sics.execute('hset sample/description ' + configModel.sample_description) sics.execute('hset sample/thickness %g' % configModel.sample_thickness) sics.execute('hset experiment/bkgLevel %g' % bkgLevel) sics.execute('hset experiment/empLevel %g' % empLevel) sics.execute('hset instrument/detector/MainDeadTime %g' % MainDeadTime) sics.execute('hset instrument/detector/TransDeadTime %g' % TransDeadTime) sics.execute('hset instrument/detector/TransBackground %g' % TransBackground) sics.execute('hset instrument/detector/TransmissionTube %i' % TransmissionTube) sics.execute('hset instrument/crystal/dOmega %g' % dOmega) sics.execute('hset instrument/crystal/gDQv %g' % gDQv) sics.execute('hset instrument/crystal/gDQh %g' % gDQh) sics.execute('hset instrument/crystal/wavelength %g' % wavelength) sics.execute('hset instrument/crystal/scan_variable ' + scanVariable); sicsController = sics.getSicsController() # slits def getSlitValues(gap, offset, a0, b0, aOpen, bOpen): if gap == 'fully opened': return (aOpen, bOpen) if gap == 'fully closed': gap = -5.0 offset = 0.0 a = a0 + 0.5 * float(gap) + float(offset) b = b0 - 0.5 * float(gap) + float(offset) return (a, b) ss1vg = configModel.ss1vg ss1vo = configModel.ss1vo ss1hg = configModel.ss1hg ss1ho = configModel.ss1ho ss2vg = configModel.ss2vg ss2vo = configModel.ss2vo ss2hg = configModel.ss2hg ss2ho = configModel.ss2ho (ss1u, ss1d) = getSlitValues(ss1vg, ss1vo, ss1u0, ss1d0, 35.8, -38.8) (ss1r, ss1l) = getSlitValues(ss1hg, ss1ho, ss1r0, ss1l0, 57.0, -58.0) (ss2u, ss2d) = getSlitValues(ss2vg, ss2vo, ss2u0, ss2d0, 37.0, -39.5) (ss2r, ss2l) = getSlitValues(ss2hg, ss2ho, ss2r0, ss2l0, 35.0, -35.0) # apply slits run = {} run['ss1u'] = ss1u run['ss1d'] = ss1d run['ss1r'] = ss1r run['ss1l'] = ss1l run['ss2u'] = ss2u run['ss2d'] = ss2d run['ss2r'] = ss2r run['ss2l'] = ss2l # sics.multiDrive(run) dc = 'drive' for key in run: dc += ' ' + key + ' ' + str(run[key]) sics.execute(dc) time.sleep(5) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) ''' sics.execute('run ss1u %.2f' % ss1u) sics.execute('run ss1d %.2f' % ss1d) sics.execute('run ss1r %.2f' % ss1r) sics.execute('run ss1l %.2f' % ss1l) sics.execute('run ss2u %.2f' % ss2u) sics.execute('run ss2d %.2f' % ss2d) sics.execute('run ss2r %.2f' % ss2r) sics.execute('run ss2l %.2f' % ss2l) ''' # drive sample environment devices slog('check sample envirment setup') multiDev = {} se_wait = 0 if configModel.se_enabled1: slog('sample controller 1 is enabled') multiDev[configModel.se_ctr1] = configModel.se_pos1 if configModel.se_wait1 > se_wait: se_wait = configModel.se_wait1 if configModel.se_enabled2: slog('sample controller 2 is enabled') multiDev[configModel.se_ctr2] = configModel.se_pos2 if configModel.se_wait2 > se_wait: se_wait = configModel.se_wait2 if configModel.se_enabled3: slog('sample controller 3 is enabled') multiDev[configModel.se_ctr3] = configModel.se_pos3 if configModel.se_wait3 > se_wait: se_wait = configModel.se_wait3 if len(multiDev) > 0: slog('drive sample environment ' + str(multiDev)) sics.multiDrive(multiDev) if se_wait > 0: slog('wait for ' + str(se_wait) + ' seconds') time.sleep(se_wait) # load sample positions sample_stage_name = configModel.sample_stage sample_positions = str(configModel.sample_position) if (len(sample_positions) == 0) or (sample_positions == 'fixed'): samz_list = [None] else: samz_list = [] stage = SAMPLE_STAGES.get_stage_by_name(sample_stage_name) if stage is None: raise 'Invalid stage name ' + str(sample_stage_name) samz_value = stage.get_samz(sample_positions) samz_list.append(samz_value) print samz_list for samz in samz_list: sics.execute('histmem stop') time.sleep(3) if mode == 'ba': sics.execute('histmem mode unlimited') sics.execute('histmem ba enable') else: sics.execute('histmem mode time') sics.execute('histmem ba disable') if samz is not None: print 'run samz %.2f' % samz sics.execute('run samz %.2f' % samz) # sics.execute('prun samz 2' % samz) !!! time.sleep(1) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) sics.execute('newfile HISTOGRAM_XYT') # sics.execute('autosave 60') # 60 seconds time.sleep(1) # start/stop hmm if mode == 'count_roi': sics.execute('histmem preset 1') time.sleep(1) sics.execute('histmem start') time.sleep(5) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) sics.execute('histmem stop') print 'frames:', len(scan['angles']) count_rate_history = [] for frame_index in xrange(len(scan['angles'])): angle = scan['angles' ][frame_index] preset = scan['presets' ][frame_index] maxTime = scan['maxTimes'][frame_index] print 'drive %s %.6f' % (scanVariable, angle) # sics.drive(scanVariable, float(angle)) sics.execute('drive %s %.6f' % (scanVariable, angle)) time.sleep(10) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) print 'drive done' time.sleep(1) if mode == 'ba': sics.execute('histmem ba roi roi') sics.execute('histmem ba monitor %i' % 1) sics.execute('histmem ba mintime %i' % configModel.min_time) sics.execute('histmem ba maxtime %i' % maxTime) sics.execute('histmem ba maxdetcount %i' % preset) sics.execute('histmem ba maxbmcount -1') sics.execute('histmem ba undermintime ba_maxdetcount') print 'histmem start' sics.execute('histmem start block') time0 = time.time() while sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): if time.time() - time0 > 15.0: print 'WARNING: HM may not have started counting. Gumtree will save anyway.' break else: time.sleep(0.1) time0 = time.time() waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) print 'time counted (estimate):', float(time.time() - time0) else: print 'histmem start' while True: if mode == 'count_roi': sics.execute('histmem preset %i' % maxTime) else: sics.execute('histmem preset %i' % preset) time.sleep(5) sics.execute('histmem start') time.sleep(5) if mode == 'count_roi': print 'count_roi' time.sleep(configModel.min_time) count_roi = 0 while not sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): try: count_roi = int(sicsext.runCommand('hmm configure num_events_filled_to_count_roi')) # print count_roi if count_roi > preset: print count_roi print 'reached desired count_roi' sics.execute('histmem pause') time.sleep(1) break except: pass time.sleep(0.5) break else: waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) valid = False for i in xrange(10): time.sleep(1) detector_time = sics.getValue('/instrument/detector/time').getFloatData() valid = (detector_time >= preset - 1) or (detector_time >= preset * 0.90) if valid: break print 'detector_time:', detector_time if valid: break else: print 'scan was invalid and needs to be repeated' # sics.execute('histmem stop') sics.execute('save %i' % frame_index) frame_index += 1 print 'histmem done' #check if in background if early_exit_enabled.value : try: roi_counts = float(sics.get_raw_value('hmm configure num_events_filled_to_count_roi')) roi_time = sics.getValue('/instrument/detector/time').getFloatData() roi_rate = roi_counts / roi_time print 'measured count rate:', roi_rate count_rate_history.append(roi_rate) bkg_frames = background_frames.value bkg_range = background_threshold.value if (len(count_rate_history) >= bkg_frames) and (builtin_max(count_rate_history[-bkg_frames:]) < bkg_range): print 'background reached' print 'scan completed (early exit)' break except: pass sics.execute('newfile clear') # sics.execute('autosave 0') # disable autosave # Get output filename filenameController = sicsController.findDeviceController('datafilename') savedFilename = filenameController.getValue().getStringData() print 'saved:', savedFilename sics.execute('histmem ba disable') # print 'fit the curve' # fit_curve() print 'done' print def btnPlotSteps_clicked(): scan = getScan() # print 'zero angle:' # print scan_reference.value print '' print 'scan variable range [%f, %f]' % (scan['angles'][0], scan['angles'][-1]) print '' #Plot1.clear() #Plot2.clear() scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if scan_angleMin == 0 and scan_angleMax == 0: print 'please select a scan template' return if scan_angleMin == scan_angleMax: print 'the min angle and max angle can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] #print [scan_angleMin, scan_angleMax] if Plot1.ds != None: Plot1.clear_masks() Plot1.add_dataset(dummy) Plot1.title = 'Preview' Plot1.x_label = 'm2om' Plot1.y_label = 'counts per sec' # Plot1.x_range = [scan_angleMin,scan_angleMax] inclusive = True angles = scan['angles'] for i in xrange(1, len(angles)): xL = angles[i - 1] xH = angles[i ] Plot1.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot1.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # convert to q PLOT 2 crystal = str(crystal_name.value) if 'Si111' in crystal: wavelength = 4.74 elif 'Si311' in crystal: wavelength = 2.37 else: wavelength = float('nan') q = convert2q(angles, scan_reference.value, wavelength) scan_angleMin = builtin_min(q) scan_angleMax = builtin_max(q) if isnan(scan_angleMin) or isnan(scan_angleMax): print 'please check the wavelength' return if scan_angleMin == scan_angleMax: print 'the min q and max q can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] if Plot2.ds != None: Plot2.clear_masks() Plot2.add_dataset(dummy) Plot2.title = 'Preview' Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.x_range = [1e-6, q[-1]] for i in xrange(1, len(q)): xL = q[i - 1] xH = q[i ] Plot2.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot2.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # print "angles" # print angles # print q print '' print 'scan q-range [%f, %f]' % (q[0], q[-1]) print '' def openDataset(path): ds = df[str(path)] ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm') # ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm_xy') ds.__iDictionary__.addEntry('time', 'entry1/instrument/detector/time') ds.__iDictionary__.addEntry('m1om', 'entry1/instrument/crystal/m1om') ds.__iDictionary__.addEntry('m1chi', 'entry1/instrument/crystal/m1chi') ds.__iDictionary__.addEntry('m1x', 'entry1/instrument/crystal/m1x') ds.__iDictionary__.addEntry('m2om', 'entry1/instrument/crystal/m2om') ds.__iDictionary__.addEntry('m2chi', 'entry1/instrument/crystal/m2chi') ds.__iDictionary__.addEntry('m2x', 'entry1/instrument/crystal/m2x') ds.__iDictionary__.addEntry('m2y', 'entry1/instrument/crystal/m2y') ds.__iDictionary__.addEntry('mdet', 'entry1/instrument/crystal/mdet') ds.__iDictionary__.addEntry('pmom', 'entry1/instrument/crystal/pmom') ds.__iDictionary__.addEntry('pmchi', 'entry1/instrument/crystal/pmchi') ds.__iDictionary__.addEntry('ss1u', 'entry1/instrument/slits/ss1u') ds.__iDictionary__.addEntry('ss1d', 'entry1/instrument/slits/ss1d') ds.__iDictionary__.addEntry('ss1r', 'entry1/instrument/slits/ss1r') ds.__iDictionary__.addEntry('ss1l', 'entry1/instrument/slits/ss1l') ds.__iDictionary__.addEntry('ss2u', 'entry1/instrument/slits/ss2u') ds.__iDictionary__.addEntry('ss2d', 'entry1/instrument/slits/ss2d') ds.__iDictionary__.addEntry('ss2r', 'entry1/instrument/slits/ss2r') ds.__iDictionary__.addEntry('ss2l', 'entry1/instrument/slits/ss2l') ds.__iDictionary__.addEntry('ss1vo', 'entry1/instrument/slits/ss1vo') ds.__iDictionary__.addEntry('ss1vg', 'entry1/instrument/slits/ss1vg') ds.__iDictionary__.addEntry('ss1ho', 'entry1/instrument/slits/ss1ho') ds.__iDictionary__.addEntry('ss1hg', 'entry1/instrument/slits/ss1hg') ds.__iDictionary__.addEntry('ss2vo', 'entry1/instrument/slits/ss2vo') ds.__iDictionary__.addEntry('ss2vg', 'entry1/instrument/slits/ss2vg') ds.__iDictionary__.addEntry('ss2ho', 'entry1/instrument/slits/ss2ho') ds.__iDictionary__.addEntry('ss2hg', 'entry1/instrument/slits/ss2hg') ds.__iDictionary__.addEntry('samplename', 'entry1/sample/name') ds.__iDictionary__.addEntry('wavelength', 'entry1/instrument/crystal/wavelength') ds.__iDictionary__.addEntry('TimeStamp', 'entry1/time_stamp') return ds def btnPlot_clicked(): #Plot1.clear() #Plot2.clear() fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return path = fns[0] basename = os.path.basename(str(path)) basename = basename[:basename.find('.nx.hdf')] ds = openDataset(path) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] samplename = str(ds.samplename) sorting = scan_variable_sorting.value if sorting: info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) Plot1.clear() if str(combine_mode.value) == 'individual': for tid in tids: if ds.hmm.ndim == 4: data[:] = ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] = ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] # dataF = data.float_copy() # dataF.title = 'Tube %i' % tid # Plot1.add_dataset(dataF) Plot1.title = 'Count Rate (individual)' else: for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] data.title = 'Tubes ' + str(tids) Plot1.set_dataset(data) Plot1.set_mouse_follower_precision(6, 2, 2) Plot1.title = basename + ' (combined): ' + samplename # Plot1.title = Plot1.title + ' ' + basename if Plot1.ds is not None: Plot1.x_label = str(scan_variable_plot.value) Plot1.y_label = 'counts per sec' Plot2.clear() time.sleep(0.3) ds0 = Plot1.ds[0] # # don't understand how this works xMax = 0 yMax = 0 for i in xrange(len(ds0)): if yMax < ds0[i]: xMax = ds0.axes[0][i] yMax = ds0[i] peakangle = xMax q = convert2q(scanVariable, peakangle, ds.wavelength) data = Dataset(data, axes=[q[:]]) # data.axes[0] = q[:] Plot2.set_dataset(data) Plot2.set_mouse_follower_precision(6, 2, 2) Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' # Plot1.title = 'Main Detector ' + basename + ': ' + samplename # Plot2.title = 'Sample: ' + samplename + '; ' + sampledescription Plot2.title = basename + ' (combined): ' + samplename Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.set_marker_on(True) # plotXMax = Par('float', q[-1]) # Plot2.x_range = [1e-6, plotXMax.value] if q[-1] > 1e-6 : Plot2.x_range = [1e-6, q[-1]] fit_curve() def convert2q(angles, reference, wavelength): if wavelength is list: wavelength = wavelength[0] wavelength = float(wavelength) deg2rad = 3.14159265359 / 180 f = 4 * 3.14159265359 / wavelength if bool(negative_steps.value): f *= -1.0 q = [(f * sin(deg2rad * (angle - reference) / 2)) for angle in angles] return q def __run_script__(fns): # Use the provided resources, please don't remove. global Plot1 global Plot2 global Plot3 print 'please press "Run Single Scan" or "Run Multiple Scans"' btnPlot_clicked() def __dispose__(): global Plot1 global Plot2 global Plot3 Plot1.clear() Plot2.clear() Plot3.clear() # # model class ConfigurationModel: def __init__(self): self.scanVariable = str(scan_variable.value) self.scanVariable = self.scanVariable[:self.scanVariable.find(' ')] self.crystal = str(crystal_name.value) self.mode = str(scan_mode.value) self.scan = getScan() self.scan_reference = scan_reference.value self.logscale = bool(logscale_position.value) self.negative = bool(negative_steps.value) self.stepInfo = [] for step in stepInfo: d = dict() for key in step.keys(): d[key] = step[key].value self.stepInfo.append(d); self.user_name = str(user_name.value) self.user_email = str(user_email.value) self.sample_name = str(sample_name.value) self.sample_description = str(sample_description.value) self.sample_thickness = float(sample_thickness.value) # vertical/horizontal pre-slit self.ss1vg = float(pss_ss1vg.value) self.ss1vo = float(pss_ss1vo.value) self.ss1hg = float(pss_ss1hg.value) self.ss1ho = float(pss_ss1ho.value) # vertical/horizontal post-slit self.ss2vg = float(pss_ss2vg.value) self.ss2vo = float(pss_ss2vo.value) self.ss2hg = float(pss_ss2hg.value) self.ss2ho = float(pss_ss2ho.value) self.se_enabled1 = bool(se_enabled1.value) self.se_ctr1 = str(se_ctr1.value) self.se_pos1 = float(se_pos1.value) self.se_wait1 = int(se_wait1.value) self.se_enabled2 = bool(se_enabled2.value) self.se_ctr2 = str(se_ctr2.value) self.se_pos2 = float(se_pos2.value) self.se_wait2 = int(se_wait2.value) self.se_enabled3 = bool(se_enabled3.value) self.se_ctr3 = str(se_ctr3.value) self.se_pos3 = float(se_pos3.value) self.se_wait3 = int(se_wait3.value) # load sample positions self.sample_stage = str(scan_sample_stage.value) self.sample_position = str(scan_sample_position.value) self.min_time = int(scan_min_time.value) # load early exit self.early_exit_enabled = bool(early_exit_enabled.value) self.bkg_frames = int(background_frames.value) self.bkg_threshold = float(background_threshold.value) def apply(self): for option in scan_variable.options: if self.scanVariable == option[:option.find(' ')]: scan_variable.value = option crystal_name.value = self.crystal scan_mode.value = self.mode logscale_position.value = self.logscale negative_steps.value = self.negative scan_reference.value = self.scan_reference i = 0 for step in self.stepInfo: for key in step.keys(): stepInfo[i][key].value = step[key] setEnabled(i) i += 1 setScanMode() user_name.value = self.user_name user_email.value = self.user_email sample_name.value = self.sample_name sample_description.value = self.sample_description sample_thickness.value = self.sample_thickness # vertical/horizontal pre-slit pss_ss1vg.value = self.ss1vg pss_ss1vo.value = self.ss1vo pss_ss1hg.value = self.ss1hg pss_ss1ho.value = self.ss1ho # vertical/horizontal post-slit pss_ss2vg.value = self.ss2vg pss_ss2vo.value = self.ss2vo pss_ss2hg.value = self.ss2hg pss_ss2ho.value = self.ss2ho se_enabled1.value = self.se_enabled1 se_ctr1.value = self.se_ctr1 se_pos1.value = self.se_pos1 se_wait1.value = self.se_wait1 toggle_se(1) se_enabled2.value = self.se_enabled2 se_ctr2.value = self.se_ctr2 se_pos2.value = self.se_pos2 se_wait2.value = self.se_wait2 toggle_se(2) se_enabled3.value = self.se_enabled3 se_ctr3.value = self.se_ctr3 se_pos3.value = self.se_pos3 se_wait3.value = self.se_wait3 toggle_se(3) # load sample positions scan_sample_position.value = self.sample_position scan_sample_stage.value = self.sample_stage scan_min_time.value = self.min_time # load early exit early_exit_enabled.value = self.early_exit_enabled background_frames.value = self.bkg_frames background_threshold.value = self.bkg_threshold if early_exit_enabled.value : background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False check instrument ready for just once __script__.title = 'KKB Measurement Script' __script__.version = '3.1' from gumpy.commons import sics from org.gumtree.gumnix.sics.control import ServerStatus from pickle import Pickler, Unpickler import time from math import log as ln from math import exp, isnan, isinf, sin from __builtin__ import max as builtin_max from __builtin__ import min as builtin_min from org.eclipse.swt.widgets import FileDialog from org.eclipse.swt import SWT from org.eclipse.swt.widgets import Display from java.io import File from gumpy.nexus.fitting import Fitting, GAUSSIAN_FITTING import math from Internal import sample_stage ''' Disable dataset caching ''' DatasetFactory.__cache_enabled__ = False SINGLE_TYPE = SWT.SINGLE SAVE_TYPE = SWT.SAVE MULTI_TYPE = SWT.MULTI class __Display_Runnable__(Runnable): def __init__(self, type=SINGLE_TYPE, ext=['*.*']): self.filename = None self.filenames = None self.path = None self.type = type self.ext = ext def run(self): global __UI__ dialog = FileDialog(__UI__.getShell(), self.type); dialog.setFilterExtensions(self.ext) dialog.open() self.filename = dialog.getFilterPath() + File.separator + dialog.getFileName() self.filenames = dialog.getFileNames() self.path = dialog.getFilterPath() def open_file_dialog(type=SWT.SINGLE, ext=['*.*']): __display_run__ = __Display_Runnable__(type, ext) Display.getDefault().asyncExec(__display_run__) while __display_run__.filename is None: time.sleep(0.5) if type == SWT.MULTI: fns = [] for fn in __display_run__.filenames: fns.append(__display_run__.path + '/' + fn) return fns return __display_run__.filename # # templates reference_templates_dict = {} reference_templates_dict['Si111'] = 180.3565 reference_templates_dict['Si311'] = -0.4100 steps_templates_list = [] # steps_templates['Background'] = [ # 'time', 'logscale', # [20, 6.0e-5, 1200, 1200]] # steps_templates['----------'] = [ # 'time', 'logscale', # [0, 0, 0, 0]] steps_templates_list.append([ 'Si111: Logarithmic Overview Scan', 'time', 'logscale', [17, 1.20e-4, 1, 1200], [30, 22.0, 20, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (few features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [34, 20.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Scan (fine features)', 'ba', 'logscale', [33, 6.0e-5, 1000, 1200], [65, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si111: Logarithmic Taiki Scan (15 points)', 'ba', 'logscale', [2, 6.0e-5, 1000, 60], [1, 10000, 1000, 60], [10, 25, 1000, 60]]) ''' steps_templates_list.append([ 'Si111: Kinetic Scan 4 points', 'time', 'logscale', [ 0, 6.0e-5, 1, 1200], [1, 5.0e-3, 180, 1200], [3, 1.5e-2, 180, 1200]]) ''' steps_templates_list.append([ '----------', 'time', 'logscale', [0, 0, 0, 0]]) steps_templates_list.append([ 'Si311: Logarithmic Overview Scan', 'time', 'logscale', [17, 2.0e-5, 1, 1200], [30, 23.0, 20, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 80+29)', 'ba', 'logscale', [80, 2e-5, 1000, 1200], [29, 15.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, broadened peak, 40+33)', 'ba', 'logscale', [40, 2e-5, 1000, 1200], [33, 10.0, 1000, 1200]]) steps_templates_list.append([ 'Si311: Logarithmic Scan (few features, Taiki)', 'ba', 'logscale', [33, 2e-5, 1000, 1200], [25, 20.0, 1000, 1200]]) ret = sample_stage.check_declarations() if not ret[0] : open_warning(ret[1]) reload(sample_stage) SAMPLE_STAGES = sample_stage.StagePool() # # export path __EXPORT_PATH__ = 'V:/shared/KKB Logbook/Temp Plot Data Repository/' if not os.path.exists(__EXPORT_PATH__): os.makedirs(__EXPORT_PATH__) # # User Details user_name = Par('string', 'Christine', options=['Christine', 'Lela', 'Jitendra']) user_name.title = 'Name' user_email = Par('string', 'cre@ansto.gov.au', options=['cre@ansto.gov.au', 'liliana.decampo@ansto.gov.au', 'jtm@ansto.gov.au']) user_email.title = 'EMail' g0 = Group('User Details') g0.numColumns = 2 g0.add(user_name, user_email) # # Sample Details sample_name = Par('string', 'UNKNOWN', options=['Empty Cell', 'Empty Beam'], command="sample_thickness.enabled = sample_name.value not in ['Empty Cell', 'Empty Beam']") sample_name.title = 'Name' sample_description = Par('string', 'UNKNOWN') sample_description.title = 'Description' sample_thickness = Par('string', '1', options=['0.01', '0.1', '1.0', '10.0']) sample_thickness.title = 'Thickness (mm)' g0 = Group('Sample Details') g0.numColumns = 2 g0.add(sample_name, sample_thickness, sample_description) # Group('Sample Details').add(sample_name, sample_description, sample_thickness) # # Crystal crystal_name = Par('string', 'UNKNOWN') crystal_name.title = 'Name' crystal_name.enabled = False try: m2om = sics.getValue('/instrument/crystal/m2om').getFloatData() if m2om > 90: crystal_name.value = 'Si111 (4.74 Angstroms)' else: crystal_name.value = 'Si311 (2.37 Angstroms)' except: pass g0 = Group('Crystal Info') g0.numColumns = 2 g0.add(crystal_name) # CRYSTAL END ############################################# # SLIT 1 ####################################################################### def updateOffset(gapBox, offsetBox): offsetBox.enabled = 'fully' not in gapBox.value def getSlitGapAndOffset(aPath, a0, bPath, b0): try: a = sics.getValue(aPath).getFloatData() b = sics.getValue(bPath).getFloatData() gap = (a - a0 - (b - b0)) / 1.0 offset = (a - a0 + (b - b0)) / 2.0 return (gap, offset) except: return (float('nan'), float('nan')) crystal = str(crystal_name.value) if 'Si111' in crystal: ss1r0 = 28.35 ss1l0 = 27.75 elif 'Si311' in crystal: ss1r0 = -9.16 ss1l0 = -9.76 else: ss1r0 = float('nan') ss1l0 = float('nan') ss1u0 = -8.04 ss1d0 = -7.30 (ss1vg, ss1vo) = getSlitGapAndOffset('/instrument/slits/ss1u', ss1u0, '/instrument/slits/ss1d', ss1d0) (ss1hg, ss1ho) = getSlitGapAndOffset('/instrument/slits/ss1r', ss1r0, '/instrument/slits/ss1l', ss1l0) pss_ss1vg = Par('string', '%.1f' % ss1vg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1vg, pss_ss1vo)') pss_ss1vg.title = 'Vertical Gap (mm)' # pss_ss1vg.colspan = 50 pss_ss1vo = Par('float', ss1vo) pss_ss1vo.title = 'Vertical Offset (mm)' # pss_ss1vo.colspan = 50 pss_ss1hg = Par('string', '%.1f' % ss1hg, options=['fully closed', '5', '10', '15', '20', '25', '30', '40', '50', 'fully opened'], command='updateOffset(pss_ss1hg, pss_ss1ho)') pss_ss1hg.title = 'Horizontal Gap (mm)' # pss_ss1hg.colspan = 50 pss_ss1ho = Par('float', ss1ho) pss_ss1ho.title = 'Horizontal Offset (mm)' # pss_ss1ho.colspan = 50 g0 = Group('Sample Slit Settings') g0.numColumns = 2 g0.add(pss_ss1vg, pss_ss1vo, pss_ss1hg, pss_ss1ho) # SLIT 1 END ####################################################################### # SAMPLE ENVIRONMENT BLOCK ######################################################### gse = Group('Sample Environment') gse.numColumns = 10 se_enabled1 = Par('bool', False, command = 'toggle_se(1)') se_enabled1.title = 'Controller 1' se_enabled1.colspan = 1 se_ctr1 = Par('string', '', options = []) se_ctr1.title = 'name' se_ctr1.colspan = 2 se_ctr1.enabled = False se_pos1 = Par('float', 0.) se_pos1 .title = 'Values' se_pos1.colspan = 6 se_pos1.enabled = False se_wait1 = Par('int', 0) se_wait1 .title = 'Wait' se_wait1.colspan = 1 se_wait1.enabled = False se_enabled2 = Par('bool', False, command = 'toggle_se(2)') se_enabled2.title = 'Controller 2' se_enabled2.colspan = 1 se_ctr2 = Par('string', '', options = []) se_ctr2.title = 'name' se_ctr2.colspan = 2 se_ctr2.enabled = False se_pos2 = Par('float', 0.) se_pos2 .title = 'Values' se_pos2.colspan = 6 se_pos2.enabled = False se_wait2 = Par('int', 0) se_wait2 .title = 'Wait' se_wait2.colspan = 1 se_wait2.enabled = False se_enabled3 = Par('bool', False, command = 'toggle_se(3)') se_enabled3.title = 'Controller 3' se_enabled3.colspan = 1 se_ctr3 = Par('string', '', options = []) se_ctr3.title = 'name' se_ctr3.colspan = 2 se_ctr3.enabled = False se_pos3 = Par('float', 0.) se_pos3 .title = 'Values' se_pos3.colspan = 6 se_pos3.enabled = False se_wait3 = Par('int', 0) se_wait3 .title = 'Wait' se_wait3.colspan = 1 se_wait3.enabled = False gse.add(se_enabled1, se_ctr1, se_pos1, se_wait1, se_enabled2, se_ctr2, se_pos2, se_wait2, se_enabled3, se_ctr3, se_pos3, se_wait3,) devices = sicsext.getDrivables() se_ctr1.options = devices se_ctr2.options = devices se_ctr3.options = devices def toggle_se(id): id = int(id) if id == 1: flag = se_enabled1.value se_ctr1.enabled = flag se_pos1.enabled = flag se_wait1.enabled = flag elif id == 2: flag = se_enabled2.value se_ctr2.enabled = flag se_pos2.enabled = flag se_wait2.enabled = flag elif id == 3: flag = se_enabled3.value se_ctr3.enabled = flag se_pos3.enabled = flag se_wait3.enabled = flag else: raise 'illegal index for sample environment' # SAMPLE ENVIRONMENT BLOCK END ##################################################### ## Scan parameters ########################################################################################################## scan_variable = Par('string', 'm2om [deg]', options=[ #'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]'], command="scan_variable_plot.value = scan_variable.value") scan_variable.title = 'Scan Variable' scan_variable.colspan = 25 scan_reference = Par('float', '0.0') scan_reference.title = 'Zero Angle' scan_reference.colspan = 25 for key in reference_templates_dict.keys(): if key in crystal_name.value: scan_reference.value = reference_templates_dict[key] scan_mode = Par('string', 'ba', options=['ba', 'time'], command='setScanMode()') scan_mode.title = 'Acquisition Mode' scan_mode.colspan = 25 scan_min_time = Par('int', '5') scan_min_time.title = 'Min Time (sec)' scan_min_time.colspan = 25 empty_label = Par('label', '') empty_label.colspan = 25 scan_sample_stage = Par('string', '', command = 'sample_stage_changed()') scan_sample_stage.colspan = 25 scan_sample_stage.title = 'Sample Stage' scan_sample_stage.options = SAMPLE_STAGES.get_stage_names() current_stage = SAMPLE_STAGES.get_stage_in_service() if not current_stage is None: scan_sample_stage.value = current_stage.get_name() scan_sample_position = Par('string', 'fixed') scan_sample_position.title = 'Sample Position' scan_sample_position.colspan = 25 scan_sample_position.options = ['fixed', '----------'] if not current_stage is None: scan_sample_position.options += current_stage.get_sample_indexes() logscale_position = Par('bool', False, command='setStepTitles()') logscale_position.title = 'Logarithmic Steps' logscale_position.colspan = 25 negative_steps = Par('bool', False) negative_steps.title = 'Negative Steps' negative_steps.colspan = 25 steps_label = Par('label', 'Please choose scan template or adjust steps manually: ') steps_label.colspan = 200 steps_templates = Par('string', '', options=[item[0] for item in steps_templates_list], command='setTemplate()') steps_templates.title = 'Scan Template' steps_templates.colspan = 100 early_exit_enabled = Par('bool', True, command = "set_early_exit_enabled()") early_exit_enabled.title = "Enable Early Exit" early_exit_enabled.colspan = 25 background_frames = Par('int', 3) background_frames.title = 'Background Frames' background_frames.colspan = 25 background_threshold = Par('float', 0.26) background_threshold.title = 'Background Threshold' background_threshold.colspan = 25 # steps_space = Par('space', '') # steps_space.colspan = 10 g0 = Group('Scan Parameters') g0.numColumns = 100 # 9 g0.add(scan_variable, scan_mode, scan_reference, early_exit_enabled, \ logscale_position, scan_min_time, scan_sample_stage, background_frames, \ negative_steps, empty_label, scan_sample_position, background_threshold, \ steps_label, steps_templates) def sample_stage_changed(): stage = SAMPLE_STAGES.get_stage_by_name(str(scan_sample_stage.value)) # scan_sample_position.value = 'fixed' if not stage is None: scan_sample_position.options = ['fixed', '----------'] + stage.get_sample_indexes() else: scan_sample_position.options = ['fixed', '----------'] def set_early_exit_enabled(): if early_exit_enabled.value: background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False stepInfo = [] for i in xrange(4): steps_e = Par('bool', True, command='setEnabled(%i)' % i) steps_e.title = '(%i)' % (i + 1) steps_e.colspan = 10 steps_m = Par('int', 0, command='clearScanTemplateSelection()') steps_m.title = 'Number of points' steps_m.colspan = 20 steps_s = Par('float', 0, command='clearScanTemplateSelection()') steps_s.title = 'Step Size [deg]' steps_s.colspan = 20 steps_p = Par('int', 0, command='clearScanTemplateSelection()') steps_p.title = 'Mode Preset' steps_p.colspan = 25 steps_t = Par('int', 1200, command='clearScanTemplateSelection()') steps_t.title = 'Max Time' steps_t.colspan = 25 stepInfo.append({'enabled': steps_e, 'dataPoints':steps_m, 'stepSize':steps_s, 'preset':steps_p, 'maxTime':steps_t}) g0.add(steps_e, steps_m, steps_s, steps_p, steps_t) def clearScanTemplateSelection(): steps_templates.value = None btnPlotSteps = Act('btnPlotSteps_clicked()', 'Plot Measurement Steps') # 'compare measurement steps with previous scan') btnPlotSteps.colspan = 50 cnfg_save_btn = Act('saveConfiguration()', 'Save Single Scan Parameters') cnfg_save_btn.colspan = 50 btnTimeEstimation = Act('runTimeEstimation()', 'Time Estimation with selected Data Set') btnTimeEstimation.colspan = 50 txtTimeEstimation = Par('int', '0') txtTimeEstimation.title = 'Time Estimation (min)' txtTimeEstimation.enabled = False txtTimeEstimation.colspan = 50 g0.add(btnPlotSteps, cnfg_save_btn, btnTimeEstimation, txtTimeEstimation) def runTimeEstimation(): if str(scan_mode.value) == 'time': scan = getScan() times = scan['presets'] txtTimeEstimation.value = int((sum(times) + len(times) * 25) / 60.0) # 25 seconds for each move return fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return ds = openDataset(fns[0]) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time data[:] = [data[item[0]] for item in info] # sorting # angle and count rate a0 = [float(angle) for angle in scanVariable] r0 = [float(rate) for rate in data[:]] # angle, counts, max time and min time model = ConfigurationModel() scan = model.scan tMin = model.min_time a1 = scan['angles'] c1 = scan['presets'] t1 = scan['maxTimes'] total = 0.0 for i in xrange(len(a1)): try: rate = sample(a0, r0, a1[i]) time = c1[i] / rate if time < tMin: total += tMin elif time > t1[i]: total += t1[i] else: total += time #print ("angle: " + str(a1[i]) # + " expected counts: " + str(c1[i]) # + " rate:" + str(rate) # + " time:" + str(time) # + " total:" + str(total)) except ValueError as e: if e.message == "OutOfRange": total += t1[i] # add max time else: raise total += int(len(a1) * 25) # 25 seconds for each move txtTimeEstimation.value = int(total / 60.0) def sample(x0, y0, x1): from __builtin__ import max, min if len(x0) != len(y0): raise Exception("len(x0) != len(y0)") x0_min = min(x0) x0_max = max(x0) if len(x0) < 2: raise Exception("len(x0) < 2") if x0_min >= x0_max: raise Exception("x0_min >= x0_max") if x1 < x0_min: raise ValueError("OutOfRange") if x0_max < x1: raise ValueError("OutOfRange") i0 = 0 i1 = 1 x0i0 = x0[i0] y0i0 = y0[i0] x0i1 = x0[i1] y0i1 = y0[i1] # in case first x values are equal while x0i0 == x0i1: i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] # not iterable while x0i1 < x1: x0i0 = x0i1 y0i0 = y0i1 i1 += 1 x0i1 = x0[i1] y0i1 = y0[i1] return y0i0 + (x1 - x0i0) * (y0i1 - y0i0) / (x0i1 - x0i0) ## Scan parameters END ######################################################################### ## RUN ############################################## cnfg_load_btn = Act('loadConfigurations()', 'Load Multiple Scan Parameters') cnfg_lookup = dict() cnfg_options = Par('string', '', options=[''], command="applyConfiguration()") cnfg_options.title = 'Read' start_scan = Act('runSingleScan()', '############# Run Single Scan #############') cnfg_run_btn = Act('runConfigurations()', '############# Run Multiple Scans #############') g0 = Group('Execute Scans') g0.numColumns = 1 g0.add(start_scan, cnfg_load_btn, cnfg_options, cnfg_run_btn) ## Save/Load Configuration END############################################################################ def saveConfiguration(): file = open_file_dialog(type=SAVE_TYPE, ext=['*.kkb']) try: fh = open(file, 'w') except: print 'not saved' return try: p = Pickler(fh) # header p.dump('KKB') # content model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): p.dump(att) p.dump(att_value) print 'saved' finally: fh.close() def loadConfigurations(): fileList = open_file_dialog(type=MULTI_TYPE, ext=['*.kkb']) if not fileList: return finalDict = dict() finalNames = [] for path in fileList: fh = open(path, 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', os.path.basename(path) else: model = ConfigurationModel() # set defaults model.negative = False # old models may not have this attribute for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', os.path.basename(path) break setattr(model, att, p.load()) else: name = os.path.basename(path) finalDict[name] = path finalNames.append(name) finally: fh.close() cnfg_lookup.clear() cnfg_lookup.update(finalDict) cnfg_options.options = finalNames cnfg_options.value = finalNames[0] if finalNames else '' # time.sleep(0.5) def applyConfiguration(): file = str(cnfg_options.value) if file is None or file == 'None' or file.strip() == '': return fh = open(cnfg_lookup[file], 'r') try: p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: # print 'read:', file model.apply() finally: fh.close() def runConfigurations(): checkInstrumentReady() for file in cnfg_options.options: fh = open(cnfg_lookup[file], 'r') try: cnfg_options.command = '' cnfg_options.value = file applyConfiguration() p = Unpickler(fh) if p.load() != 'KKB': print 'ERROR:', file else: model = ConfigurationModel() for att in dir(model): att_value = getattr(model, att) if (att.find('_') != 0) and ('instancemethod' not in str(type(att_value))): if p.load() != att: print 'FORMAT ERROR:', file break setattr(model, att, p.load()) else: print 'run:', file startScan(model) finally: cnfg_options.command = 'applyConfiguration()' fh.close() # # Plot tubes_label = Par('label', 'Main Detector:') tubes_label.colspan = 1 combine_tube0 = Par('bool', True) combine_tube0.title = ' Tube 0' combine_tube0.colspan = 1 combine_tube1 = Par('bool', True) combine_tube1.title = ' Tube 1' combine_tube1.colspan = 1 combine_tube2 = Par('bool', True) combine_tube2.title = ' Tube 2' combine_tube2.colspan = 1 combine_tube3 = Par('bool', True) combine_tube3.title = ' Tube 3' combine_tube3.colspan = 1 combine_tube4 = Par('bool', True) combine_tube4.title = ' Tube 4' combine_tube4.colspan = 1 combine_tube6 = Par('bool', False) combine_tube6.title = ' Tube 6' combine_tube6.colspan = 1 combine_mode = Par('string', 'combined', options=['individual', 'combined']) combine_mode.title = ' Mode' combine_mode.colspan = 1 trans_tube_label = Par('label', 'Trans Detector: ') trans_tube_label.colspan = 2 check_tube9 = Par('bool', True) check_tube9.title = ' Tube 9: Si (311)' check_tube9.colspan = 2 check_tube10 = Par('bool', False) check_tube10.title = ' Tube 10: Si (111)' check_tube10.colspan = 2 # steps_space = Par('space', '') # steps_space.colspan = 12 scan_variable_plot = Par('string', 'm2om [deg]', options=[ 'pmom [deg]', 'pmchi [deg]', 'm1om [deg]', 'm1chi [deg]', 'm1x [mm]', 'm2om [deg]', 'm2chi [deg]', 'm2x [mm]', 'm2y [mm]', 'mdet [mm]', 'ss1u [mm]', 'ss1d [mm]', 'ss1l [mm]', 'ss1r [mm]', 'ss2u [mm]', 'ss2d [mm]', 'ss2l [mm]', 'ss2r [mm]', 'ss1vg [mm]', 'ss1vo [mm]', 'ss1hg [mm]', 'ss1ho [mm]', 'ss2vg [mm]', 'ss2vo [mm]', 'ss2hg [mm]', 'ss2ho [mm]']) scan_variable_plot.title = 'Scan Variable' scan_variable_plot.colspan = 1 scan_variable_sorting = Par('bool', True) scan_variable_sorting.title = 'Sorting' scan_variable_sorting.colspan = 1 btnPlot = Act('btnPlot_clicked()', 'Plot Selected Data Set') btnPlot.colspan = 8 g0 = Group('Plotting') g0.numColumns = 7 g0.add(tubes_label, combine_tube0, combine_tube1, combine_tube2, combine_tube3, combine_tube4, combine_tube6, combine_mode, trans_tube_label, check_tube9, check_tube10, scan_variable_plot, scan_variable_sorting, btnPlot) # export to csv # btnExport = Act('export_clicked()', 'Export to CSV') ################################# SLIT 2 ########################################################## ss2u0 = 0 # 2.00 ss2d0 = 0 # 1.40 ss2l0 = 0 # 5 0.50 ss2r0 = 0 # -2 -1.00 (ss2vg, ss2vo) = getSlitGapAndOffset('/instrument/slits/ss2u', ss2u0, '/instrument/slits/ss2d', ss2d0) (ss2hg, ss2ho) = getSlitGapAndOffset('/instrument/slits/ss2r', ss2r0, '/instrument/slits/ss2l', ss2l0) pss_ss2vg = Par('string', '%.1f' % ss2vg, options=pss_ss1vg.options, command='updateOffset(pss_ss2vg, pss_ss2vo)') pss_ss2vg.title = 'Vertical Opening (mm)' pss_ss2vo = Par('float', ss2vo) pss_ss2vo.title = 'Vertical Offset (mm)' pss_ss2hg = Par('string', '%.1f' % ss2hg, options=pss_ss1hg.options, command='updateOffset(pss_ss2hg, pss_ss2ho)') pss_ss2hg.title = 'Horizontal Opening (mm)' pss_ss2ho = Par('float', ss2ho) pss_ss2ho.title = 'Horizontal Offset (mm)' g0 = Group('Post-Sample Slit') g0.numColumns = 2 g0.add(pss_ss2vg, pss_ss2vo, pss_ss2hg, pss_ss2ho) ################################# SLIT 2 END ########################################################## ################################# CURVE FITTING START ########################################################## g_fit = Group('Fitting') g_fit.numColumns = 2 #data_name = Par('string', 'total_counts', \ # options = ['total_counts', 'bm1_counts', 'bm2_counts']) #normalise = Par('bool', True) #axis_name = Par('string', '') #axis_name.enabled = True #auto_fit = Par('bool', False) #fit_min = Par('float', 'NaN') #fit_min.title = 'min x' #fit_max = Par('float', 'NaN') #fit_max.title = 'max x' peak_pos = Par('float', 'NaN') peak_pos.title = 'fitting peak position' FWHM = Par('float', 'NaN') FWHM.title = 'fitting FWHM' fact = Act('fit_curve()', 'Fit Again') fact.colspan = 2 #offset_done = Par('bool', False) #act3 = Act('offset_s2()', 'Set Device Zero Offset') g_fit.add(peak_pos, FWHM, fact) def fit_curve(): global Plot1 ds = Plot1.ds if len(ds) == 0: log('Error: no curve to fit in Plot1.\n') return for d in ds: if d.title == 'fitting': Plot1.remove_dataset(d) d0 = ds[0] fitting = Fitting(GAUSSIAN_FITTING) try: fitting.set_histogram(d0) fitting.fitter.setResolutionMultiple(50) val = peak_pos.value if val == val: fitting.set_param('mean', val) val = FWHM.value if val == val: fitting.set_param('sigma', math.fabs(val / 2.35482)) res = fitting.fit() res.var[:] = 0 res.title = 'fitting' Plot1.add_dataset(res) Plot1.pv.getPlot().setCurveMarkerVisible(Plot1.__get_NXseries__(res), False) mean = fitting.params['mean'] mean_err = fitting.errors['mean'] FWHM.value = 2.35482 * math.fabs(fitting.params['sigma']) FWHM_err = 5.54518 * math.fabs(fitting.errors['sigma']) log('POS_OF_PEAK=' + str(mean) + ' +/- ' + str(mean_err)) log('FWHM=' + str(FWHM.value) + ' +/- ' + str(FWHM_err)) log('Chi2 = ' + str(fitting.fitter.getQuality())) peak_pos.value = fitting.mean # print fitting.params except: # traceback.print_exc(file = sys.stdout) log('can not fit\n') ################################# CURVE FITTING END ########################################################## def waitUntilSicsIs(status, dt=0.2): controller = sics.getSicsController() timeout = 5 while True: sics.handleInterrupt() count = 0 while not controller.getServerStatus().equals(status) and count < timeout: time.sleep(dt) count += dt if controller.getServerStatus().equals(status): break else: controller.refreshServerStatus() sics.handleInterrupt() def setStepTitles(): if logscale_position.value: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Factor [%]" else: for stepInfoItem in stepInfo[1:]: stepInfoItem['stepSize'].title = "Step Size [deg]" __UI__.updateUI() def setTemplate(): try: matches = [item for item in steps_templates_list if item[0] == steps_templates.value] if len(matches) != 1: steps_templates.value = None return template = matches[0] # ignore '----' if template[0][0] == '-': steps_templates.value = None return scan_mode.value = template[1] if template[2] == 'logscale': logscale_position.value = True elif template[2] == 'linear': logscale_position.value = False setStepTitles() # by default templates measure in positive direction negative_steps.value = False setScanMode() headers = 3 for i in xrange(len(template) - headers): templateItem = template[i + headers] stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = True stepInfoItem['dataPoints'].enabled = True stepInfoItem['dataPoints'].value = templateItem[0] stepInfoItem['stepSize' ].enabled = True stepInfoItem['stepSize' ].value = templateItem[1] stepInfoItem['preset' ].enabled = True stepInfoItem['preset' ].value = templateItem[2] stepInfoItem['maxTime' ].enabled = scan_min_time.enabled stepInfoItem['maxTime' ].value = templateItem[3] for i in xrange(len(template) - headers, len(stepInfo)): stepInfoItem = stepInfo[i] stepInfoItem['enabled' ].value = False stepInfoItem['dataPoints'].enabled = False stepInfoItem['stepSize' ].enabled = False stepInfoItem['preset' ].enabled = False stepInfoItem['maxTime' ].enabled = False except: pass def setScanMode(): if scan_mode.value == 'time': scan_min_time.enabled = False for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = False else: scan_min_time.enabled = True for stepInfoItem in stepInfo: stepInfoItem['maxTime'].enabled = stepInfoItem['enabled'].value def setEnabled(index): steps_templates.value = None stepItem = stepInfo[index] value = stepItem['enabled'].value stepItem['dataPoints'].enabled = value stepItem['stepSize' ].enabled = value stepItem['preset' ].enabled = value stepItem['maxTime' ].enabled = value and scan_min_time.enabled setTemplate() def getScan(): scan = { 'angles': [], 'presets': [], 'maxTimes': [], 'groups': [] } first = True angle_ref = scan_reference.value angle = angle_ref logscale = False # first data points are always on a linear scale negative = bool(negative_steps.value) for stepInfoItem in stepInfo: if stepInfoItem['enabled'].value: dataPoints = stepInfoItem['dataPoints'].value stepSize = stepInfoItem['stepSize' ].value preset = stepInfoItem['preset' ].value maxTime = stepInfoItem['maxTime' ].value if (dataPoints > 0) and (stepSize <= 0.0): raise Exception('step sizes have to be positive') for i in xrange(dataPoints): if first and (i == 0): angle -= ((dataPoints - 1) / 2.0) * stepSize; elif logscale: # for logscale stepSize is a stepFactor angle = angle_ref + (angle - angle_ref) * (1.0 + 0.01 * stepSize) else: angle += stepSize #print angle scan['angles' ].append(angle) scan['presets' ].append(preset) scan['maxTimes'].append(maxTime) if i == 0: scan['groups'].append(angle) first = False logscale = bool(logscale_position.value) if negative: # negate angles with reference to zero angle scan['angles'] = [angle_ref - (angle - angle_ref) for angle in scan['angles']] return scan def wait_for_idle(): c_time = time.time() while not sics.getSicsController().getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): time.sleep(0.1) if time.time() - c_time > 5: serverStatus = sics.get_status() c_time = time.time() def checkInstrumentReady(): ''' check instrument ready ''' all_ready = False is_ready = False is_shielded = False msg = None try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' is_shielded = sics.getValue('/instrument/GreenPolyShield/greenpolyshield').getStringData().lower() == 'in' if not is_ready: if not is_shielded: msg = 'The instrument is not ready and the green polyshield is not applied. ' \ + 'Please get the '\ + 'instrument ready and apply the polyshield. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' else: msg = 'The instrument is not ready according to the SIS status. ' \ + 'Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' else: if not is_shielded: msg = 'The green polyshield is not applied. ' \ + 'Please apply the polyshield. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' all_ready = is_ready and is_shielded except: pass if not all_ready: if not msg: msg = 'The instrument is not ready according to the SIS status. ' \ + 'Please get the '\ + 'instrument ready. Then click on "Yes" to continue. \n'\ + 'Do you want to continue?' is_confirmed = open_question(msg) if not is_confirmed: slog('Instrument is not ready. Quit the scan.') return else: try: is_ready = sics.getValue('/instrument/status/ready').getStringData() == 'TRUE' is_shielded = sics.getValue('/instrument/GreenPolyShield/greenpolyshield').getStringData().lower() == 'in' except: pass if not is_ready: slog('scan continued without instrument ready') if not is_shielded: slog('scan continued without green polysheild') def runSingleScan(): checkInstrumentReady() startScan(ConfigurationModel()) def startScan(configModel): ''' setup ''' scanVariable = configModel.scanVariable crystal = configModel.crystal mode = configModel.mode MainDeadTime = 1.08E-6 TransDeadTime = 1.08E-6 if 'Si111' in crystal: empLevel = 0.3 bkgLevel = 0.21 dOmega = 2.3E-6 gDQv = 0.0586 gDQh = 0 wavelength = 4.74 TransmissionTube = 10 TransBackground = 0 # counts per second elif 'Si311' in crystal: empLevel = 0.34 bkgLevel = 0.21 dOmega = 4.6E-7 gDQv = 0.117 gDQh = 0 wavelength = 2.37 TransmissionTube = 9 TransBackground = 0 # counts per second else: print 'selected crystal is invalid' return ''' angles ''' scan = configModel.scan scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if ('m1om' in scanVariable) or ('m2om' in scanVariable): tolerance = 6 approved = False if 'Si111' in crystal: if (180 - tolerance <= scan_angleMin) and (scan_angleMax <= 180 + tolerance): approved = True elif 'Si311' in crystal: if (0 - tolerance <= scan_angleMin) and (scan_angleMax <= 0 + tolerance): approved = True if not approved: print 'angle out of range' return ''' execution ''' sics.execute('hset user/name ' + configModel.user_name) sics.execute('hset user/email ' + configModel.user_email) sics.execute('hset sample/name ' + configModel.sample_name) sics.execute('hset sample/description ' + configModel.sample_description) sics.execute('hset sample/thickness %g' % configModel.sample_thickness) sics.execute('hset experiment/bkgLevel %g' % bkgLevel) sics.execute('hset experiment/empLevel %g' % empLevel) sics.execute('hset instrument/detector/MainDeadTime %g' % MainDeadTime) sics.execute('hset instrument/detector/TransDeadTime %g' % TransDeadTime) sics.execute('hset instrument/detector/TransBackground %g' % TransBackground) sics.execute('hset instrument/detector/TransmissionTube %i' % TransmissionTube) sics.execute('hset instrument/crystal/dOmega %g' % dOmega) sics.execute('hset instrument/crystal/gDQv %g' % gDQv) sics.execute('hset instrument/crystal/gDQh %g' % gDQh) sics.execute('hset instrument/crystal/wavelength %g' % wavelength) sics.execute('hset instrument/crystal/scan_variable ' + scanVariable); sicsController = sics.getSicsController() # slits def getSlitValues(gap, offset, a0, b0, aOpen, bOpen): if gap == 'fully opened': return (aOpen, bOpen) if gap == 'fully closed': gap = -5.0 offset = 0.0 a = a0 + 0.5 * float(gap) + float(offset) b = b0 - 0.5 * float(gap) + float(offset) return (a, b) ss1vg = configModel.ss1vg ss1vo = configModel.ss1vo ss1hg = configModel.ss1hg ss1ho = configModel.ss1ho ss2vg = configModel.ss2vg ss2vo = configModel.ss2vo ss2hg = configModel.ss2hg ss2ho = configModel.ss2ho (ss1u, ss1d) = getSlitValues(ss1vg, ss1vo, ss1u0, ss1d0, 35.8, -38.8) (ss1r, ss1l) = getSlitValues(ss1hg, ss1ho, ss1r0, ss1l0, 57.0, -58.0) (ss2u, ss2d) = getSlitValues(ss2vg, ss2vo, ss2u0, ss2d0, 37.0, -39.5) (ss2r, ss2l) = getSlitValues(ss2hg, ss2ho, ss2r0, ss2l0, 35.0, -35.0) # apply slits run = {} run['ss1u'] = ss1u run['ss1d'] = ss1d run['ss1r'] = ss1r run['ss1l'] = ss1l run['ss2u'] = ss2u run['ss2d'] = ss2d run['ss2r'] = ss2r run['ss2l'] = ss2l # sics.multiDrive(run) dc = 'drive' for key in run: dc += ' ' + key + ' ' + str(run[key]) sics.execute(dc) time.sleep(5) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) ''' sics.execute('run ss1u %.2f' % ss1u) sics.execute('run ss1d %.2f' % ss1d) sics.execute('run ss1r %.2f' % ss1r) sics.execute('run ss1l %.2f' % ss1l) sics.execute('run ss2u %.2f' % ss2u) sics.execute('run ss2d %.2f' % ss2d) sics.execute('run ss2r %.2f' % ss2r) sics.execute('run ss2l %.2f' % ss2l) ''' # drive sample environment devices slog('check sample envirment setup') multiDev = {} se_wait = 0 if configModel.se_enabled1: slog('sample controller 1 is enabled') multiDev[configModel.se_ctr1] = configModel.se_pos1 if configModel.se_wait1 > se_wait: se_wait = configModel.se_wait1 if configModel.se_enabled2: slog('sample controller 2 is enabled') multiDev[configModel.se_ctr2] = configModel.se_pos2 if configModel.se_wait2 > se_wait: se_wait = configModel.se_wait2 if configModel.se_enabled3: slog('sample controller 3 is enabled') multiDev[configModel.se_ctr3] = configModel.se_pos3 if configModel.se_wait3 > se_wait: se_wait = configModel.se_wait3 if len(multiDev) > 0: slog('drive sample environment ' + str(multiDev)) sics.multiDrive(multiDev) if se_wait > 0: slog('wait for ' + str(se_wait) + ' seconds') time.sleep(se_wait) # load sample positions sample_stage_name = configModel.sample_stage sample_positions = str(configModel.sample_position) if (len(sample_positions) == 0) or (sample_positions == 'fixed'): samz_list = [None] else: samz_list = [] stage = SAMPLE_STAGES.get_stage_by_name(sample_stage_name) if stage is None: raise 'Invalid stage name ' + str(sample_stage_name) samz_value = stage.get_samz(sample_positions) samz_list.append(samz_value) print samz_list for samz in samz_list: sics.execute('histmem stop') time.sleep(3) if mode == 'ba': sics.execute('histmem mode unlimited') sics.execute('histmem ba enable') else: sics.execute('histmem mode time') sics.execute('histmem ba disable') if samz is not None: print 'run samz %.2f' % samz sics.execute('run samz %.2f' % samz) # sics.execute('prun samz 2' % samz) !!! time.sleep(1) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) sics.execute('newfile HISTOGRAM_XYT') # sics.execute('autosave 60') # 60 seconds time.sleep(1) # start/stop hmm if mode == 'count_roi': sics.execute('histmem preset 1') time.sleep(1) sics.execute('histmem start') time.sleep(5) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) sics.execute('histmem stop') print 'frames:', len(scan['angles']) count_rate_history = [] for frame_index in xrange(len(scan['angles'])): angle = scan['angles' ][frame_index] preset = scan['presets' ][frame_index] maxTime = scan['maxTimes'][frame_index] print 'drive %s %.6f' % (scanVariable, angle) # sics.drive(scanVariable, float(angle)) sics.execute('drive %s %.6f' % (scanVariable, angle)) time.sleep(10) waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) print 'drive done' time.sleep(1) if mode == 'ba': sics.execute('histmem ba roi roi') sics.execute('histmem ba monitor %i' % 1) sics.execute('histmem ba mintime %i' % configModel.min_time) sics.execute('histmem ba maxtime %i' % maxTime) sics.execute('histmem ba maxdetcount %i' % preset) sics.execute('histmem ba maxbmcount -1') sics.execute('histmem ba undermintime ba_maxdetcount') print 'histmem start' sics.execute('histmem start block') time0 = time.time() while sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): if time.time() - time0 > 15.0: print 'WARNING: HM may not have started counting. Gumtree will save anyway.' break else: time.sleep(0.1) time0 = time.time() waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) print 'time counted (estimate):', float(time.time() - time0) else: print 'histmem start' while True: if mode == 'count_roi': sics.execute('histmem preset %i' % maxTime) else: sics.execute('histmem preset %i' % preset) time.sleep(5) sics.execute('histmem start') time.sleep(5) if mode == 'count_roi': print 'count_roi' time.sleep(configModel.min_time) count_roi = 0 while not sicsController.getServerStatus().equals(ServerStatus.EAGER_TO_EXECUTE): try: count_roi = int(sicsext.runCommand('hmm configure num_events_filled_to_count_roi')) # print count_roi if count_roi > preset: print count_roi print 'reached desired count_roi' sics.execute('histmem pause') time.sleep(1) break except: pass time.sleep(0.5) break else: waitUntilSicsIs(ServerStatus.EAGER_TO_EXECUTE) valid = False for i in xrange(10): time.sleep(1) detector_time = sics.getValue('/instrument/detector/time').getFloatData() valid = (detector_time >= preset - 1) or (detector_time >= preset * 0.90) if valid: break print 'detector_time:', detector_time if valid: break else: print 'scan was invalid and needs to be repeated' # sics.execute('histmem stop') sics.execute('save %i' % frame_index) frame_index += 1 print 'histmem done' #check if in background if early_exit_enabled.value : try: roi_counts = float(sics.get_raw_value('hmm configure num_events_filled_to_count_roi')) roi_time = sics.getValue('/instrument/detector/time').getFloatData() roi_rate = roi_counts / roi_time print 'measured count rate:', roi_rate count_rate_history.append(roi_rate) bkg_frames = background_frames.value bkg_range = background_threshold.value if (len(count_rate_history) >= bkg_frames) and (builtin_max(count_rate_history[-bkg_frames:]) < bkg_range): print 'background reached' print 'scan completed (early exit)' break except: pass sics.execute('newfile clear') # sics.execute('autosave 0') # disable autosave # Get output filename filenameController = sicsController.findDeviceController('datafilename') savedFilename = filenameController.getValue().getStringData() print 'saved:', savedFilename sics.execute('histmem ba disable') # print 'fit the curve' # fit_curve() print 'done' print def btnPlotSteps_clicked(): scan = getScan() # print 'zero angle:' # print scan_reference.value print '' print 'scan variable range [%f, %f]' % (scan['angles'][0], scan['angles'][-1]) print '' #Plot1.clear() #Plot2.clear() scan_angleMin = builtin_min(scan['angles']) scan_angleMax = builtin_max(scan['angles']) if scan_angleMin == 0 and scan_angleMax == 0: print 'please select a scan template' return if scan_angleMin == scan_angleMax: print 'the min angle and max angle can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] #print [scan_angleMin, scan_angleMax] if Plot1.ds != None: Plot1.clear_masks() Plot1.add_dataset(dummy) Plot1.title = 'Preview' Plot1.x_label = 'm2om' Plot1.y_label = 'counts per sec' # Plot1.x_range = [scan_angleMin,scan_angleMax] inclusive = True angles = scan['angles'] for i in xrange(1, len(angles)): xL = angles[i - 1] xH = angles[i ] Plot1.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot1.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # convert to q PLOT 2 crystal = str(crystal_name.value) if 'Si111' in crystal: wavelength = 4.74 elif 'Si311' in crystal: wavelength = 2.37 else: wavelength = float('nan') q = convert2q(angles, scan_reference.value, wavelength) scan_angleMin = builtin_min(q) scan_angleMax = builtin_max(q) if isnan(scan_angleMin) or isnan(scan_angleMax): print 'please check the wavelength' return if scan_angleMin == scan_angleMax: print 'the min q and max q can not be the same' return dummy = zeros(2) dummy.axes[0] = [scan_angleMin, scan_angleMax] if Plot2.ds != None: Plot2.clear_masks() Plot2.add_dataset(dummy) Plot2.title = 'Preview' Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.x_range = [1e-6, q[-1]] for i in xrange(1, len(q)): xL = q[i - 1] xH = q[i ] Plot2.add_mask_1d(xL, xH, '', inclusive) inclusive = not inclusive groups = scan['groups'] for i in xrange(len(groups)): Plot2.add_mask_1d(groups[i], groups[i] + 1e-12, str(i + 1), True) # print "angles" # print angles # print q print '' print 'scan q-range [%f, %f]' % (q[0], q[-1]) print '' def openDataset(path): ds = df[str(path)] ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm') # ds.__iDictionary__.addEntry('hmm', 'entry1/data/hmm_xy') ds.__iDictionary__.addEntry('time', 'entry1/instrument/detector/time') ds.__iDictionary__.addEntry('m1om', 'entry1/instrument/crystal/m1om') ds.__iDictionary__.addEntry('m1chi', 'entry1/instrument/crystal/m1chi') ds.__iDictionary__.addEntry('m1x', 'entry1/instrument/crystal/m1x') ds.__iDictionary__.addEntry('m2om', 'entry1/instrument/crystal/m2om') ds.__iDictionary__.addEntry('m2chi', 'entry1/instrument/crystal/m2chi') ds.__iDictionary__.addEntry('m2x', 'entry1/instrument/crystal/m2x') ds.__iDictionary__.addEntry('m2y', 'entry1/instrument/crystal/m2y') ds.__iDictionary__.addEntry('mdet', 'entry1/instrument/crystal/mdet') ds.__iDictionary__.addEntry('pmom', 'entry1/instrument/crystal/pmom') ds.__iDictionary__.addEntry('pmchi', 'entry1/instrument/crystal/pmchi') ds.__iDictionary__.addEntry('ss1u', 'entry1/instrument/slits/ss1u') ds.__iDictionary__.addEntry('ss1d', 'entry1/instrument/slits/ss1d') ds.__iDictionary__.addEntry('ss1r', 'entry1/instrument/slits/ss1r') ds.__iDictionary__.addEntry('ss1l', 'entry1/instrument/slits/ss1l') ds.__iDictionary__.addEntry('ss2u', 'entry1/instrument/slits/ss2u') ds.__iDictionary__.addEntry('ss2d', 'entry1/instrument/slits/ss2d') ds.__iDictionary__.addEntry('ss2r', 'entry1/instrument/slits/ss2r') ds.__iDictionary__.addEntry('ss2l', 'entry1/instrument/slits/ss2l') ds.__iDictionary__.addEntry('ss1vo', 'entry1/instrument/slits/ss1vo') ds.__iDictionary__.addEntry('ss1vg', 'entry1/instrument/slits/ss1vg') ds.__iDictionary__.addEntry('ss1ho', 'entry1/instrument/slits/ss1ho') ds.__iDictionary__.addEntry('ss1hg', 'entry1/instrument/slits/ss1hg') ds.__iDictionary__.addEntry('ss2vo', 'entry1/instrument/slits/ss2vo') ds.__iDictionary__.addEntry('ss2vg', 'entry1/instrument/slits/ss2vg') ds.__iDictionary__.addEntry('ss2ho', 'entry1/instrument/slits/ss2ho') ds.__iDictionary__.addEntry('ss2hg', 'entry1/instrument/slits/ss2hg') ds.__iDictionary__.addEntry('samplename', 'entry1/sample/name') ds.__iDictionary__.addEntry('wavelength', 'entry1/instrument/crystal/wavelength') ds.__iDictionary__.addEntry('TimeStamp', 'entry1/time_stamp') return ds def btnPlot_clicked(): #Plot1.clear() #Plot2.clear() fns = [] for sds in __DATASOURCE__.getSelectedDatasets(): fns.append(sds.getLocation()) if len(fns) != 1: print 'select one dataset' return path = fns[0] basename = os.path.basename(str(path)) basename = basename[:basename.find('.nx.hdf')] ds = openDataset(path) scanVariable = str(scan_variable.value) scanVariable = scanVariable[:scanVariable.find(' ')] scanVariable = ds[scanVariable] samplename = str(ds.samplename) sorting = scan_variable_sorting.value if sorting: info = sorted(enumerate(scanVariable), key=lambda item:item[1]) scanVariable = [item[1] for item in info] shape = ds.shape if shape[0] <= 1: print 'Must have at least 2 scan positions' return n = shape[0] # tubes data = zeros(n) tids = [] if combine_tube0.value: tids.append(0) if combine_tube1.value: tids.append(1) if combine_tube2.value: tids.append(2) if combine_tube3.value: tids.append(3) if combine_tube4.value: tids.append(4) if combine_tube6.value: tids.append(6) Plot1.clear() if str(combine_mode.value) == 'individual': for tid in tids: if ds.hmm.ndim == 4: data[:] = ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] = ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] # dataF = data.float_copy() # dataF.title = 'Tube %i' % tid # Plot1.add_dataset(dataF) Plot1.title = 'Count Rate (individual)' else: for tid in tids: if ds.hmm.ndim == 4: data[:] += ds.hmm[:, 0, :, tid].sum(0) # hmm else: data[:] += ds.hmm[:, :, tid].sum(0) # hmm_xy if data.size == 1: data[0] = data[0] * 1.0 / ds.time else: data[:] = data[:] * 1.0 / ds.time if sorting: data[:] = [data[item[0]] for item in info] # sorting data.var[:] = 0 # total_counts / (ds.time * ds.time) axis0 = data.axes[0] axis0[:] = scanVariable[:] data.title = 'Tubes ' + str(tids) Plot1.set_dataset(data) Plot1.set_mouse_follower_precision(6, 2, 2) Plot1.title = basename + ' (combined): ' + samplename # Plot1.title = Plot1.title + ' ' + basename if Plot1.ds is not None: Plot1.x_label = str(scan_variable_plot.value) Plot1.y_label = 'counts per sec' Plot2.clear() time.sleep(0.3) ds0 = Plot1.ds[0] # # don't understand how this works xMax = 0 yMax = 0 for i in xrange(len(ds0)): if yMax < ds0[i]: xMax = ds0.axes[0][i] yMax = ds0[i] peakangle = xMax q = convert2q(scanVariable, peakangle, ds.wavelength) data = Dataset(data, axes=[q[:]]) # data.axes[0] = q[:] Plot2.set_dataset(data) Plot2.set_mouse_follower_precision(6, 2, 2) Plot2.x_label = 'q [1/A]' Plot2.y_label = 'counts per sec' # Plot1.title = 'Main Detector ' + basename + ': ' + samplename # Plot2.title = 'Sample: ' + samplename + '; ' + sampledescription Plot2.title = basename + ' (combined): ' + samplename Plot2.set_log_x_on(True) Plot2.set_log_y_on(True) Plot2.set_marker_on(True) # plotXMax = Par('float', q[-1]) # Plot2.x_range = [1e-6, plotXMax.value] if q[-1] > 1e-6 : Plot2.x_range = [1e-6, q[-1]] fit_curve() def convert2q(angles, reference, wavelength): if wavelength is list: wavelength = wavelength[0] wavelength = float(wavelength) deg2rad = 3.14159265359 / 180 f = 4 * 3.14159265359 / wavelength if bool(negative_steps.value): f *= -1.0 q = [(f * sin(deg2rad * (angle - reference) / 2)) for angle in angles] return q def __run_script__(fns): # Use the provided resources, please don't remove. global Plot1 global Plot2 global Plot3 print 'please press "Run Single Scan" or "Run Multiple Scans"' btnPlot_clicked() def __dispose__(): global Plot1 global Plot2 global Plot3 Plot1.clear() Plot2.clear() Plot3.clear() # # model class ConfigurationModel: def __init__(self): self.scanVariable = str(scan_variable.value) self.scanVariable = self.scanVariable[:self.scanVariable.find(' ')] self.crystal = str(crystal_name.value) self.mode = str(scan_mode.value) self.scan = getScan() self.scan_reference = scan_reference.value self.logscale = bool(logscale_position.value) self.negative = bool(negative_steps.value) self.stepInfo = [] for step in stepInfo: d = dict() for key in step.keys(): d[key] = step[key].value self.stepInfo.append(d); self.user_name = str(user_name.value) self.user_email = str(user_email.value) self.sample_name = str(sample_name.value) self.sample_description = str(sample_description.value) self.sample_thickness = float(sample_thickness.value) # vertical/horizontal pre-slit self.ss1vg = float(pss_ss1vg.value) self.ss1vo = float(pss_ss1vo.value) self.ss1hg = float(pss_ss1hg.value) self.ss1ho = float(pss_ss1ho.value) # vertical/horizontal post-slit self.ss2vg = float(pss_ss2vg.value) self.ss2vo = float(pss_ss2vo.value) self.ss2hg = float(pss_ss2hg.value) self.ss2ho = float(pss_ss2ho.value) self.se_enabled1 = bool(se_enabled1.value) self.se_ctr1 = str(se_ctr1.value) self.se_pos1 = float(se_pos1.value) self.se_wait1 = int(se_wait1.value) self.se_enabled2 = bool(se_enabled2.value) self.se_ctr2 = str(se_ctr2.value) self.se_pos2 = float(se_pos2.value) self.se_wait2 = int(se_wait2.value) self.se_enabled3 = bool(se_enabled3.value) self.se_ctr3 = str(se_ctr3.value) self.se_pos3 = float(se_pos3.value) self.se_wait3 = int(se_wait3.value) # load sample positions self.sample_stage = str(scan_sample_stage.value) self.sample_position = str(scan_sample_position.value) self.min_time = int(scan_min_time.value) # load early exit self.early_exit_enabled = bool(early_exit_enabled.value) self.bkg_frames = int(background_frames.value) self.bkg_threshold = float(background_threshold.value) def apply(self): for option in scan_variable.options: if self.scanVariable == option[:option.find(' ')]: scan_variable.value = option crystal_name.value = self.crystal scan_mode.value = self.mode logscale_position.value = self.logscale negative_steps.value = self.negative scan_reference.value = self.scan_reference i = 0 for step in self.stepInfo: for key in step.keys(): stepInfo[i][key].value = step[key] setEnabled(i) i += 1 setScanMode() user_name.value = self.user_name user_email.value = self.user_email sample_name.value = self.sample_name sample_description.value = self.sample_description sample_thickness.value = self.sample_thickness # vertical/horizontal pre-slit pss_ss1vg.value = self.ss1vg pss_ss1vo.value = self.ss1vo pss_ss1hg.value = self.ss1hg pss_ss1ho.value = self.ss1ho # vertical/horizontal post-slit pss_ss2vg.value = self.ss2vg pss_ss2vo.value = self.ss2vo pss_ss2hg.value = self.ss2hg pss_ss2ho.value = self.ss2ho se_enabled1.value = self.se_enabled1 se_ctr1.value = self.se_ctr1 se_pos1.value = self.se_pos1 se_wait1.value = self.se_wait1 toggle_se(1) se_enabled2.value = self.se_enabled2 se_ctr2.value = self.se_ctr2 se_pos2.value = self.se_pos2 se_wait2.value = self.se_wait2 toggle_se(2) se_enabled3.value = self.se_enabled3 se_ctr3.value = self.se_ctr3 se_pos3.value = self.se_pos3 se_wait3.value = self.se_wait3 toggle_se(3) # load sample positions scan_sample_position.value = self.sample_position scan_sample_stage.value = self.sample_stage scan_min_time.value = self.min_time # load early exit early_exit_enabled.value = self.early_exit_enabled background_frames.value = self.bkg_frames background_threshold.value = self.bkg_threshold if early_exit_enabled.value : background_frames.enabled = True background_threshold.enabled = True else: background_frames.enabled = False background_threshold.enabled = False
#!/usr/bin/python # -*- coding: utf-8 -*- """ Copyright (c) 2014 windpro Author : windpro E-mail : windprog@gmail.com Date : 14/12/26 Desc : 接口调用规范。 """ from abc import ABCMeta, abstractmethod class BaseSpeed(object): __metaclass__ = ABCMeta def add_one(self): pass class BaseCheckCallback(object): __metaclass__ = ABCMeta @abstractmethod def __call__(self, method, url, req_query_string, req_headers, req_data, status_code, res_headers, res_data): # 根据http请求结果判断 pass class BaseUpdate(object): __metaclass__ = ABCMeta def __init__(self, expired, is_sync, save_check_callback=None, retry_limit=1, retry_check_callback=None): # 缓存过期时间,单位秒 self.expired = expired # 过期时获取的动作,True为等待最新数据完成才返回 self.is_sync = is_sync # 检测是否需要储存缓存 self.save_check_callback = save_check_callback # 检测是否需要重新尝试下载 self.retry_check_callback = retry_check_callback # 重试次数,默认为1 self.retry_limit = 1 for callback in [save_check_callback, retry_check_callback]: if callback and not isinstance(callback, BaseCheckCallback): raise ValueError('%s must be BaseCheckCallback subclass instance' % callback.__name__) @abstractmethod def backend_call(self, method, url, req_query_string, req_headers, req_data): # 后台运行 pass @abstractmethod def check_sync(self): # 检测是否阻塞执行 pass @abstractmethod def is_expired_incache(self, method, url, req_query_string, req_headers, req_data, **kwargs): # 返回元组() 第一个为是否过期,第二个为是否在缓存中 pass class BaseRoute(object): __metaclass__ = ABCMeta @abstractmethod def match(self, url): # 检测url是否满足本路由条件 pass @abstractmethod def dumps(self): # 将自身导出字符串 pass class BaseHttpCache(object): __metaclass__ = ABCMeta @abstractmethod def find(self, method, url, req_query_string, req_headers, req_data): # 获取换成你 pass @abstractmethod def get_update_time(self, method, url, req_query_string, req_headers, req_data): # 获取缓存上一次更新时间 pass @abstractmethod def save(self, method, url, req_query_string, req_headers, req_data, status_code, res_headers, res_data): # 保存缓存 pass @abstractmethod def delete(self, method, url, req_query_string, req_headers, req_data): # 删除缓存 pass 删除多余接口 #!/usr/bin/python # -*- coding: utf-8 -*- """ Copyright (c) 2014 windpro Author : windpro E-mail : windprog@gmail.com Date : 14/12/26 Desc : 接口调用规范。 """ from abc import ABCMeta, abstractmethod class BaseSpeed(object): __metaclass__ = ABCMeta def add_one(self): pass class BaseCheckCallback(object): __metaclass__ = ABCMeta @abstractmethod def __call__(self, method, url, req_query_string, req_headers, req_data, status_code, res_headers, res_data): # 根据http请求结果判断 pass class BaseUpdate(object): __metaclass__ = ABCMeta def __init__(self, expired, is_sync, save_check_callback=None, retry_limit=1, retry_check_callback=None): # 缓存过期时间,单位秒 self.expired = expired # 过期时获取的动作,True为等待最新数据完成才返回 self.is_sync = is_sync # 检测是否需要储存缓存 self.save_check_callback = save_check_callback # 检测是否需要重新尝试下载 self.retry_check_callback = retry_check_callback # 重试次数,默认为1 self.retry_limit = 1 for callback in [save_check_callback, retry_check_callback]: if callback and not isinstance(callback, BaseCheckCallback): raise ValueError('%s must be BaseCheckCallback subclass instance' % callback.__name__) @abstractmethod def backend_call(self, method, url, req_query_string, req_headers, req_data): # 后台运行 pass @abstractmethod def check_sync(self): # 检测是否阻塞执行 pass @abstractmethod def is_expired_incache(self, method, url, req_query_string, req_headers, req_data, **kwargs): # 返回元组() 第一个为是否过期,第二个为是否在缓存中 pass class BaseRoute(object): __metaclass__ = ABCMeta @abstractmethod def match(self, url): # 检测url是否满足本路由条件 pass class BaseHttpCache(object): __metaclass__ = ABCMeta @abstractmethod def find(self, method, url, req_query_string, req_headers, req_data): # 获取换成你 pass @abstractmethod def get_update_time(self, method, url, req_query_string, req_headers, req_data): # 获取缓存上一次更新时间 pass @abstractmethod def save(self, method, url, req_query_string, req_headers, req_data, status_code, res_headers, res_data): # 保存缓存 pass @abstractmethod def delete(self, method, url, req_query_string, req_headers, req_data): # 删除缓存 pass
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class EtsfIo(Package): """ETSF_IO is a library implementing the Nanoquanta/ETSF file format specifications. ETSF_IO enables an architecture-independent exchange of crystallographic data, electronic wavefunctions, densities and potentials, as well as spectroscopic data. It is meant to be used by quantum-physical and quantum-chemical applications relying upon Density Functional Theory (DFT). """ homepage = "http://www.etsf.eu/resources/software/libraries_and_tools" url = "https://launchpad.net/etsf-io/1.0/1.0.4/+download/etsf_io-1.0.4.tar.gz" version('1.0.4', sha256='3140c2cde17f578a0e6b63acb27a5f6e9352257a1371a17b9c15c3d0ef078fa4') depends_on("netcdf-fortran") depends_on("hdf5+mpi~cxx", when='+mpi') # required for NetCDF-4 support def install(self, spec, prefix): options = ['--prefix=%s' % prefix] oapp = options.append # Specify installation directory for Fortran module files # Default is [INCLUDEDIR/FC_TYPE] oapp("--with-moduledir=%s" % prefix.include) # Netcdf4/HDF hdf_libs = "-L%s -lhdf5_hl -lhdf5" % spec["hdf5"].prefix.lib options.extend([ "--with-netcdf-incs=-I%s" % spec["netcdf-fortran"].prefix.include, "--with-netcdf-libs=-L%s -lnetcdff -lnetcdf %s" % ( spec["netcdf-fortran"].prefix.lib, hdf_libs), ]) configure(*options) make() make("check") make("install") etsf-io: added missing `mpi` variant (#23083) The variant was mentioned in a depends_on directive, but never declared # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class EtsfIo(Package): """ETSF_IO is a library implementing the Nanoquanta/ETSF file format specifications. ETSF_IO enables an architecture-independent exchange of crystallographic data, electronic wavefunctions, densities and potentials, as well as spectroscopic data. It is meant to be used by quantum-physical and quantum-chemical applications relying upon Density Functional Theory (DFT). """ homepage = "http://www.etsf.eu/resources/software/libraries_and_tools" url = "https://launchpad.net/etsf-io/1.0/1.0.4/+download/etsf_io-1.0.4.tar.gz" version('1.0.4', sha256='3140c2cde17f578a0e6b63acb27a5f6e9352257a1371a17b9c15c3d0ef078fa4') variant('mpi', default=True, description='Add MPI support') depends_on("netcdf-fortran") depends_on("hdf5+mpi~cxx", when='+mpi') # required for NetCDF-4 support def install(self, spec, prefix): options = ['--prefix=%s' % prefix] oapp = options.append # Specify installation directory for Fortran module files # Default is [INCLUDEDIR/FC_TYPE] oapp("--with-moduledir=%s" % prefix.include) # Netcdf4/HDF hdf_libs = "-L%s -lhdf5_hl -lhdf5" % spec["hdf5"].prefix.lib options.extend([ "--with-netcdf-incs=-I%s" % spec["netcdf-fortran"].prefix.include, "--with-netcdf-libs=-L%s -lnetcdff -lnetcdf %s" % ( spec["netcdf-fortran"].prefix.lib, hdf_libs), ]) configure(*options) make() make("check") make("install")
import os import pytz import numpy as np import desisurvey.NTS import desisurvey.svstats import desiutil.log from desisurvey.scripts import collect_etc from astropy.io import fits from astropy.time import Time from astropy.coordinates import EarthLocation from astropy import units as u def run_plan(obsplan=None): utc = pytz.timezone('utc') tz = pytz.timezone('US/Arizona') kpno = EarthLocation.of_site('kpno') nts = desisurvey.NTS.NTS(obsplan=obsplan) t0 = nts.scheduler.night_ephem['brightdusk'] nts_dir, _ = os.path.split(nts.obsplan) etcfn = os.path.join(nts_dir, 'etc-stats-{}.fits'.format(nts_dir)) exps = fits.getdata(etcfn, 'EXPS') nincond = collect_etc.number_in_conditions(exps) donecond = desisurvey.svstats.donefrac_in_conditions(nincond) desiutil.log.get_logger().setLevel(desiutil.log.WARNING) previoustiles = [] print('local lst cond tile ra dec program fac tot split ' 'b/g/d') while t0 < nts.scheduler.night_ephem['brightdawn']: expdict = dict(mjd=t0, previoustiles=previoustiles) res = nts.next_tile(exposure=expdict, speculative=True) if not res['foundtile']: print('no tiles!') t0 += 60 continue previoustiles.append(res['fiberassign']) lst = Time(t0, format='mjd', location=kpno).sidereal_time('apparent') lst = lst.to(u.deg).value tt = Time(t0, format='mjd').to_datetime(timezone=utc) nsofar = (donecond[donecond['TILEID'] == res['fiberassign']]) ind = np.flatnonzero( nts.scheduler.tiles.tileID == res['fiberassign'])[0] ra = nts.scheduler.tiles.tileRA[ind] dec = nts.scheduler.tiles.tileDEC[ind] if len(nsofar) > 0: nsofar = [nsofar['NNIGHT_DARK'], nsofar['NNIGHT_GRAY'], nsofar['NNIGHT_BRIGHT']] else: nsofar = [0, 0, 0] print('%s %5.1f %6s %d %5.1f %5.1f %10s %3.1f %4d %6s %d/%d/%d' % ( tt.astimezone(tz).strftime('%H:%M'), lst, res['conditions'], res['fiberassign'], ra, dec, res['program'], res['exposure_factor'], res['esttime'].astype('i4'), ('%dx%d' % (res['count'], res['exptime'])), *nsofar)) t0 += (res['exptime']+180)*res['count']/60/60/24 def parse(options=None): import argparse parser = argparse.ArgumentParser( description='run an example night plan', epilog='EXAMPLE: %(prog)s [YYYYMMDD/config.yaml]') parser.add_argument('obsplan', nargs='?', default=None, type=str, help='obsplan to use; default YYYYMMDD/config.yaml') if options is None: args = parser.parse_args() else: args = parser.parse_args(options) return args def main(args): run_plan(obsplan=args.obsplan) Fix typo in run_plan output. import os import pytz import numpy as np import desisurvey.NTS import desisurvey.svstats import desiutil.log from desisurvey.scripts import collect_etc from astropy.io import fits from astropy.time import Time from astropy.coordinates import EarthLocation from astropy import units as u def run_plan(obsplan=None): utc = pytz.timezone('utc') tz = pytz.timezone('US/Arizona') kpno = EarthLocation.of_site('kpno') nts = desisurvey.NTS.NTS(obsplan=obsplan) t0 = nts.scheduler.night_ephem['brightdusk'] nts_dir, _ = os.path.split(nts.obsplan) etcfn = os.path.join(nts_dir, 'etc-stats-{}.fits'.format(nts_dir)) exps = fits.getdata(etcfn, 'EXPS') nincond = collect_etc.number_in_conditions(exps) donecond = desisurvey.svstats.donefrac_in_conditions(nincond) desiutil.log.get_logger().setLevel(desiutil.log.WARNING) previoustiles = [] print('local lst cond tile ra dec program fac tot split ' 'd/g/b') while t0 < nts.scheduler.night_ephem['brightdawn']: expdict = dict(mjd=t0, previoustiles=previoustiles) res = nts.next_tile(exposure=expdict, speculative=True) if not res['foundtile']: print('no tiles!') t0 += 60 continue previoustiles.append(res['fiberassign']) lst = Time(t0, format='mjd', location=kpno).sidereal_time('apparent') lst = lst.to(u.deg).value tt = Time(t0, format='mjd').to_datetime(timezone=utc) nsofar = (donecond[donecond['TILEID'] == res['fiberassign']]) ind = np.flatnonzero( nts.scheduler.tiles.tileID == res['fiberassign'])[0] ra = nts.scheduler.tiles.tileRA[ind] dec = nts.scheduler.tiles.tileDEC[ind] if len(nsofar) > 0: nsofar = [nsofar['NNIGHT_DARK'], nsofar['NNIGHT_GRAY'], nsofar['NNIGHT_BRIGHT']] else: nsofar = [0, 0, 0] print('%s %5.1f %6s %d %5.1f %5.1f %10s %3.1f %4d %6s %d/%d/%d' % ( tt.astimezone(tz).strftime('%H:%M'), lst, res['conditions'], res['fiberassign'], ra, dec, res['program'], res['exposure_factor'], res['esttime'].astype('i4'), ('%dx%d' % (res['count'], res['exptime'])), *nsofar)) t0 += (res['exptime']+180)*res['count']/60/60/24 def parse(options=None): import argparse parser = argparse.ArgumentParser( description='run an example night plan', epilog='EXAMPLE: %(prog)s [YYYYMMDD/config.yaml]') parser.add_argument('obsplan', nargs='?', default=None, type=str, help='obsplan to use; default YYYYMMDD/config.yaml') if options is None: args = parser.parse_args() else: args = parser.parse_args(options) return args def main(args): run_plan(obsplan=args.obsplan)
#!/usr/bin/python """ This module is the central part of MaxiNet and is intended to be the only part of MaxiNet which needs to be used by the user or third-party applications """ import re import sys import logging from functools import partial import time import subprocess import random import atexit from mininet.node import RemoteController, UserSwitch import Pyro4 from MaxiNet.Frontend.tools import Tools import tools from client import Frontend, log_and_reraise_remote_exception from partitioner import Partitioner from MaxiNet.Frontend.cli import CLI # the following block is to support deprecation warnings. this is really not # solved nicely and should probably be somewhere else import warnings import functools logger = logging.getLogger(__name__) def deprecated(func): '''This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.''' @functools.wraps(func) def new_func(*args, **kwargs): logger.warn("Call to deprecated function {}.".format(func.__name__)) warnings.warn_explicit( "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1 ) return func(*args, **kwargs) return new_func def run_cmd(cmd): """ run cmd on frontend machine """ return subprocess.check_output(cmd,shell=False) def run_cmd_shell(cmd): """ run cmd on frontend machine with the shell """ return subprocess.check_output(cmd,shell=True) class Worker: """ represents a worker machine in an running experiment """ def __init__(self,frontend,wid,switch=UserSwitch): self.creator = frontend.getObjectProxy(wid+".mnCreator") self.cmd = frontend.getObjectProxy(wid+".cmd") self.config = frontend.getObjectProxy("config") if(not self.config.runWith1500MTU()): self._fix_mtus() self.switch=switch self.x11tunnels=[] self.wid=int(wid[6:]) @log_and_reraise_remote_exception def hn(self): """ return hostname of worker machine """ return self.cmd.get_hostname() def set_switch(self,switch): """ set default switch class """ self.switch=switch @log_and_reraise_remote_exception def configLinkStatus(self, src, dst, status): """ wrapper for configLinkStatus method on remote mininet""" self.creator.configLinkStatus(src,dst,status) @log_and_reraise_remote_exception def ip(self): """ return public ip adress of worker machine """ return self.config.getIP(self.hn()) @log_and_reraise_remote_exception def start(self,topo,tunnels, controller=None): """ start mininet instance on worker machine emulating the in topo specified topology. if controller is not specified mininet will start an own controller for this net """ if controller: self.creator.create_mininet(topo=topo, tunnels=tunnels, controller = controller, switch=self.switch) else: self.creator.create_mininet(topo=topo, tunnels=tunnels, switch=self.switch) @log_and_reraise_remote_exception def daemonize(self,cmd): """ run command in background and terminate when MaxiNet is shut down """ self.cmd.daemonize(cmd) @log_and_reraise_remote_exception def tunnelX11(self,node): """ create X11 tunnel on worker to make x-forwarding work """ if(not node in self.x11tunnels): try: display = subprocess.check_output("ssh -Y "+self.hn()+ " env | grep DISPLAY",shell=True)[8:] self.creator.tunnelX11(node,display) self.x11tunnels.append(node) except subprocess.CalledProcessError: return False return True @log_and_reraise_remote_exception def run_cmd_on_host(self,host,cmd): """ run cmd in context of host and return output, where host is the name of an host in the mininet instance running on the worker machine """ return self.creator.runCmdOnHost(host,cmd) @log_and_reraise_remote_exception def run_cmd(self,cmd): """ run cmd on worker machine and return output """ return self.cmd.check_output(cmd) @log_and_reraise_remote_exception def run_script(self,cmd): """ run cmd on worker machine from the Worker/bin directory return output """ return self.cmd.script_check_output(cmd) @log_and_reraise_remote_exception def rpc(self, host, cmd, *params1, **params2): """ internal function to do rpc calls """ return self.creator.rpc(host, cmd, *params1, **params2) @log_and_reraise_remote_exception def rattr(self, host,name): """ internal function to get attributes of objects """ return self.creator.attr(host, name) @log_and_reraise_remote_exception def _fix_mtus(self): if self.ip() is None : logger.warn("no ip configured - can not fix MTU ") return 0 intf = self.run_cmd("ip addr show to "+self.ip()+"/24 | head -n1 | cut -d' ' -f2 | tr -d :").strip() if intf == "" : logger.warn("could not find eth device - can not fix MTU") return 0 mtu = int(self.run_cmd("ip li show dev "+intf+" | head -n1 | cut -d ' ' -f5")) if(mtu<1600): self.run_cmd("ip li se dev "+intf+" mtu 1600") @log_and_reraise_remote_exception def stop(self): """ stop mininet instance on this worker """ return self.creator.stop() def get_file(self,src,dst): """ transfer file specified by src on worker to dst on frontend. uses scp command to transfer file """ cmd_get = ["scp",self.hn()+":\""+src+"\"",dst] subprocess.call(cmd_get) def put_file(self,src,dst): """ transfer file specified by src on frontend to dst on worker. uses scp command to transfer file """ cmd_put = ["scp",src,self.hn()+":"+dst] subprocess.call(cmd_put) @log_and_reraise_remote_exception def addHost(self,name, cls=None, **params): """ add host at runtime. you probably want to use Experiment.addHost """ return self.creator.addHost(name,cls,**params) @log_and_reraise_remote_exception def addSwitch(self,name, cls=None,**params): """ add switch at runtime. you probably want to use Experiment.addSwitch """ return self.creator.addSwitch(name,cls,**params) @log_and_reraise_remote_exception def addController(self,name="c0", controller=None, **params): """ add controller at runtime. you probably want to use Experiment.addController """ return self.creator.addHost(name,controller,**params) @log_and_reraise_remote_exception def addTunnel(self,name, switch, port, cls, **params): """ add tunnel at runtime. you probably want to use Experiment.addLink """ return self.creator.addTunnel(name, switch, port, cls,**params) @log_and_reraise_remote_exception def addLink(self, node1, node2, port1 = None, port2 = None, cls = None, **params): """ add link at runtime. you probably want to use Experiment.addLink """ return self.creator.addLink(node1, node2, port1, port2, cls, **params) class TunHelper: """ internal class to manage tunnel interface names """ def __init__(self): self.tunnr = 0 self.keynr = 0 def get_tun_nr(self): self.tunnr = self.tunnr +1 return self.tunnr -1 def get_key_nr(self): self.keynr = self.keynr+1 return self.keynr -1 def get_last_tun_nr(self): return self.tunnr - 1 def get_last_key_nr(self): return self.keynr - 1 class Cluster: """ manage a set of workers via this class. to create several different topologys do not destroy/recreate this class but define several Experiment instances running sequential """ def __init__(self,*hosts): """ create Cluster object. Starting with MaxiNet 0.2 the hosts parameter is optional. If None is given all configured hosts will be used """ self.running=False self.logger = logging.getLogger(__name__) self.tunhelper = TunHelper() self.config = tools.Config("","",register = False) logging.basicConfig(level=self.config.getLoggingLevel()) if hosts: self.hosts = hosts else: self.hosts = self.config.getHosts() self.worker=[] if(self.hosts[0]!=subprocess.check_output(["hostname"]).strip()): rhost = self.config.getIP(self.hosts[0]) else: if(len(self.hosts)>1): rhost = self.config.getIP(self.hosts[1]) else: rhost = self.config.getIP(self.hosts[0]) if sys.platform == "darwin": route = subprocess.check_output(["route", "-vn", "get", rhost]).splitlines()[-1] m = re.search(r' (\d+\.\d+\.\d+\.\d+)$', route) else: route = subprocess.check_output("ip route get "+rhost+" | head -n1", shell=True).strip() m = re.search(r'src (\d+\.\d+\.\d+\.\d+)', route) if m is not None: self.localIP = m.group(1) else: self.localIP="127.0.0.1" self.nsport=9090 self.frontend = Frontend(self.localIP, self.nsport) self.config = tools.Config(self.localIP,self.nsport) atexit.register(run_cmd, self.getWorkerMangerCMD("--stop")) atexit.register(self._stop) def getWorkerMangerCMD(self,cmd): cmdline = [self.config.getWorkerScript("worker_manager.py"), "--ns", self.localIP+":" + str(self.nsport), cmd] if self.config.debugPyroOnWorker(): cmdline.append("--debugPyro") if self.config.keepScreenOpenOnError(): cmdline.append("--keepScreenOpenOnError") cmdline.extend(self.hosts) return cmdline def is_running(self): return self.running def get_worker_shares(self): """ returns list of workload shares per worker """ shares=None if(self.running): shares=[] for w in self.worker: shares.append(self.config.getShare(w.hn())) wsum=0 for w in shares: wsum+=w shares = map(lambda x: x/float(wsum),shares) return shares def check_reachability(self,ip): """ check whether ip is reachable for a packet with MTU > 1500 """ cmd = ["ping","-c 2","-s 1520",ip] if(subprocess.call(cmd,stdout=open("/dev/null"),) == 1): return False else: return True def start(self): """ start MaxiNet on assigned worker machines and establish communication. Returns True in case of successful startup. """ self.logger.info("starting worker processes") cmd = self.getWorkerMangerCMD("--start") if self.frontend.hmac_key(): cmd.extend(["--hmac",self.frontend.hmac_key()]) self.logger.debug(run_cmd(cmd)) timeout = 10 for i in range(0,len(self.hosts)): self.logger.info("waiting for Worker "+str(i+1)+" to register on nameserver...") started=False end = time.time()+timeout while(not started): try: self.frontend.lookup("worker"+str(i+1)+".mnCreator") started=True except Pyro4.errors.NamingError: if(time.time()>end): raise RuntimeError("Timed out waiting for worker "+str(i+1)+".mnCreator to register.") time.sleep(0.1) started=False end=time.time()+timeout while(not started): try: self.frontend.lookup("worker"+str(i+1)+".cmd") started=True except Pyro4.errors.NamingError: time.sleep(0.1) if(time.time()>end): raise RuntimeError("Timed out waiting for worker "+str(i+1)+".cmd to register.") self.worker.append(Worker(self.frontend,"worker"+str(i+1))) if(not self.config.runWith1500MTU()): for host in self.hosts: if(not self.check_reachability(self.config.getIP(host))): self.logger.error("Host "+host+" is not reachable with an MTU > 1500.") raise RuntimeError("Host "+host+" is not reachable with an MTU > 1500.") for worker in self.worker: worker.run_script("load_tunneling.sh") self.logger.info("worker processes started") self.running = True return True def _stop(self): if(self.running): self.logger.info("removing tunnels...") self.remove_all_tunnels() #self.logger.info("shutting cluster down...") self.logger.info("removing nameserver entries...") for i in range(0,self.num_workers()): self.frontend.remove("worker"+str(i)+".mnCreator") self.frontend.remove("worker"+str(i)+".cmd") self.logger.info("shutting down frontend...") self.logger.info("Goodbye.") self.running=False def stop(self): """ stop cluster and shut it down """ self._stop() def num_workers(self): """ return number of worker nodes in this cluster """ return len(self.workers()) def workers(self): """ return list of worker instances for this cluster, ordered by worker id """ if(self.is_running()): return self.worker else: return [] def get_worker(self, wid): """ return worker with id wid """ return self.workers()[wid] def create_tunnel(self,w1,w2): """ create gre tunnel connecting worker machine w1 and w2 and return name of created network interface """ tid = self.tunhelper.get_tun_nr() tkey = self.tunhelper.get_key_nr() intf = "mn_tun"+str(tid) ip1=w1.ip() ip2=w2.ip() self.logger.debug("invoking tunnel create commands on "+ip1+" and "+ip2) w1.run_script("create_tunnel.sh "+ip1+" "+ip2+" "+intf+" "+str(tkey)) w2.run_script("create_tunnel.sh "+ip2+" "+ip1+" "+intf+" "+str(tkey)) self.logger.debug("done") return intf def remove_all_tunnels(self): """ shut down all tunnels created in this cluster """ for worker in self.workers(): worker.run_script("delete_tunnels.sh") self.tunhelper = TunHelper() class Experiment: """ use this class to specify experiment. Experiments are created for one-time-usage and have to be stopped in the end. One cluster instance can run several experiments in sequence """ def __init__(self,cluster, topology,controller=None, is_partitioned=False, switch=UserSwitch): self.cluster = cluster self.logger = logging.getLogger(__name__) self.topology=None self.config = tools.Config(register = False) self.starttime = time.localtime() self.printed_log_info = False self.isMonitoring = False if is_partitioned: self.topology = topology else: self.origtopology = topology self.node_to_workerid = {} self.node_to_wrapper = {} self.nodes = [] self.hosts = [] self.tunnellookup = {} self.switches = [] self.switch=switch if controller: contr = controller else: contr = self.config.getController() if contr.find(":")>=0: (host, port) = contr.split(":") else: host = contr port = "6633" self.controller = partial(RemoteController, ip=host, port=int(port)) def configLinkStatus(self, src, dst, status): """Change status of src <-> dst links. src: node name dst: node name status: string {up, down}""" ws = self.get_worker(src) wd = self.get_worker(dst) if(ws==wd): # src and dst are on same worker. let mininet handle this ws.configLinkStatus(src,dst,status) else: src=self.get(src) dst=self.get(dst) intf = self.tunnellookup[(src.name,dst.name)] src.cmd("ifconfig "+intf+" "+status) dst.cmd("ifconfig "+intf+" "+status) @deprecated def find_worker(self,node): """ return worker which emulates the specified node. Replaced by get_worker """ return self.get_worker(node) def get_worker(self,node): """ return worker which emulates the specified node. """ if(isinstance(node,NodeWrapper)): return node.worker return self.cluster.get_worker(self.node_to_workerid[node]) def get_log_folder(self): """ returns folder to which log files will be saved """ return "/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/" def terminate_logging(self): for worker in self.cluster.workers(): worker.run_cmd("killall getRxTx.sh getMemoryUsage.sh") self.isMonitoring = False def log_cpu(self): """ log cpu useage of workers and place log files in /tmp/maxinet_logs/ """ for worker in self.cluster.workers(): self.log_cpu_of_worker(worker) def log_cpu_of_worker(self,worker): """ log cpu usage of worker and place log file in /tmp/maxinet_logs/ """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) atexit.register(worker.get_file,"/tmp/maxinet_cpu_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") worker.daemonize("LANG=en_EN.UTF-8 mpstat 1 | while read l; do echo -n \"`date +%s` \" ; echo \"$l \" ; done > \"/tmp/maxinet_cpu_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def log_free_memory(self): """ log memory usage of workers and place log files in /tmp/maxinet_logs Format is: timestamp,FreeMemory,Buffers,Cached """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) for worker in self.cluster.workers(): atexit.register(worker.get_file,"/tmp/maxinet_mem_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") memmon = worker.config.getWorkerScript("getMemoryUsage.sh") worker.daemonize(memmon + " > \"/tmp/maxinet_mem_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def log_interfaces_of_node(self,node): """ logs statistics of interfaces of node and places them in /tmp/maxinet_logs Format is: timestamp,received bytes,sent bytes,received packets,sent packets """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) node = self.get(node) worker = self.get_worker(node) for intf in node.intfNames(): self.log_interface(worker,intf) def log_interface(self,worker,intf): """ logs statistics of interface of worker and places them in /tmp/maxinet_logs Format is: timestamp,received bytes,sent bytes,received packets,sent packets """ atexit.register(worker.get_file,"/tmp/maxinet_intf_"+intf+"_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") ethmon = worker.config.getWorkerScript("getRxTx.sh") worker.daemonize(ethmon + " "+intf+" > \"/tmp/maxinet_intf_"+intf+"_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def monitor(self): """ logs statistics of worker interfaces and memory usage and places them in /tmp/maxinet_logs """ self.isMonitoring = True atexit.register(self._print_monitor_info) self.log_free_memory() self.log_cpu() for worker in self.cluster.workers(): intf = worker.run_cmd("ip addr show to "+worker.ip()+"/24 | head -n1 | cut -d' ' -f2 | tr -d :").strip() if(intf == ""): self.logger.warn("could not find main interface for "+worker.hn()+". no logging possible.") else: self.log_interface(worker,intf) def _print_log_info(self): if(not self.printed_log_info): self.printed_log_info=True self.logger.info("Log files will be placed in /tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/. You might want to save them somewhere else.") def _print_monitor_info(self): self.logger.info("You monitored this experiment. To generate a graph from your logs call \"/usr/local/share/MaxiNet/maxinet_plot.py /tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/ plot.png\" ") def CLI(self,plocals,pglobals): """ open interactive command line interface """ CLI(self,plocals,pglobals) def addNode(self, name,**params): """ add node at runtime. use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ wid = random.randint(0,self.cluster.num_workers()-1) if "pos" in params.keys(): wid = self.node_to_workerid[params["pos"]] del params["pos"] if "wid" in params.keys(): wid = int(params["wid"])-1 del params["wid"] worker = self.cluster.get_worker(wid) self.node_to_workerid[name]=wid self.node_to_wrapper[name]=NodeWrapper(name,self.get_worker(name)) self.nodes.append(self.node_to_wrapper[name]) def addHost(self, name, cls = None, **params): """ add host at runtime. use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name,**params) self.get_worker(name).addHost(name,cls, **params) self.hosts.append(self.get(name)) return self.get(name) def addSwitch(self, name, cls = None, **params): """ add switch at runtime use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name, **params) self.get_worker(name).addSwitch(name,cls, **params) self.switches.append(self.get(name)) return self.get(name) def addController(self, name="c0",controller = None, **params): """ add controller at runtime use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name, **params) self.get_worker(name).addController(name, controller, **params) return self.get(name) def name(self, node): """ return name assigned to specified network node. """ if(isinstance(node,NodeWrapper)): return node.nn return node def addLink(self, node1, node2, port1 = None, port2 = None, cls = None, autoconf=False, **params): """ add links at runtime. will create tunnels between workers if necessary, but can not create tunnels between hosts and switches. (links will work fine) autoconf parameter handles attach() and config calls on switches and hosts """ w1 = self.get_worker(node1) w2 = self.get_worker(node2) node1 = self.get(node1) node2 = self.get(node2) if(w1==w2): self.logger.debug("no tunneling needed") l=w1.addLink(self.name(node1),self.name(node2), port1, port2, cls, **params) else: self.logger.debug("tunneling needed") if(not ((node1 in self.switches) and (node2 in self.switches))): self.logger.error("We cannot create tunnels between switches and hosts. Sorry.") raise RuntimeError("Can't create tunnel between switch and host") intf = self.cluster.create_tunnel(w1,w2) w1.addTunnel(intf,self.name(node1), port1, cls, **params) w2.addTunnel(intf,self.name(node2), port2, cls, **params) l=((self.name(node1),intf),(self.name(node2),intf)) if(autoconf): if(node1 in self.switches): node1.attach(l[0][1]) else: node1.configDefault() if(node2 in self.switches): node2.attach(l[1][1]) else: node2.configDefault() if(self.config.runWith1500MTU()): self.setMTU(node1,1450) self.setMTU(node2,1450) def _find_topo_of_node(self,node, topos): for topo in topos: if node in topo.g.nodes(): return topo def get_node(self, nodename): """ return node that is specified by nodename :rtype : NodeWrapper """ if(self.node_to_wrapper.has_key(nodename)): return self.node_to_wrapper[nodename] else: return None def get(self,nodename): """ alias for get_node """ return self.get_node(nodename) def setup(self): """ start cluster if not yet started, assign topology parts to workers and start workers """ if(not self.cluster.is_running()): self.cluster.start() if(not self.cluster.is_running()): raise RuntimeError("Cluster won't start") self.logger.info("Clustering topology...") if(not self.topology): parti = Partitioner() parti.loadtopo(self.origtopology) self.topology = parti.partition(self.cluster.num_workers(),self.cluster.get_worker_shares()) # assigning shares to workers requires that the workers are already startet. elsewise we don't have a way to determine the workerid of the worker. topologies are assigned to workers in ascending workerid order self.logger.debug("Tunnels: "+str(self.topology.getTunnels())) subtopos = self.topology.getTopos() if(len(subtopos) > self.cluster.num_workers()): raise RuntimeError("Cluster does not have enough workers for given topology") for subtopo in subtopos: for node in subtopo.nodes(): self.node_to_workerid[node]=subtopos.index(subtopo) self.nodes.append(NodeWrapper(node, self.get_worker(node))) self.node_to_wrapper[node]=self.nodes[-1] if (not subtopo.isSwitch(node)): self.hosts.append(self.nodes[-1]) else: self.switches.append(self.nodes[-1]) self.logger.debug("Nodemapping: %s",self.node_to_workerid) tunnels = [[] for x in range(len(subtopos))] for tunnel in self.topology.getTunnels(): w1 = self.get_worker(tunnel[0]) w2 = self.get_worker(tunnel[1]) intf = self.cluster.create_tunnel(w1,w2) self.tunnellookup[(tunnel[0],tunnel[1])]=intf self.tunnellookup[(tunnel[1],tunnel[0])]=intf for i in range(0,2): tunnels[self.node_to_workerid[tunnel[i]]].append([intf, tunnel[i], tunnel[2]]) # Assumes that workerid = subtopoid for topo in subtopos: self.cluster.workers()[subtopos.index(topo)].set_switch(self.switch) if(self.controller): self.cluster.workers()[subtopos.index(topo)].start(topo=topo, tunnels=tunnels[subtopos.index(topo)], controller=self.controller) else: self.cluster.workers()[subtopos.index(topo)].start(topo=topo, tunnels=tunnels[subtopos.index(topo)]) if (self.config.runWith1500MTU()): for topo in subtopos: for host in topo.nodes(): self.setMTU(host,1450) def setMTU(self,host,mtu): for intf in self.get(host).intfNames(): self.get(host).cmd("ifconfig "+intf+" mtu "+str(mtu)) @deprecated def run_cmd_on_host(self,host, cmd): """ run cmd on emulated host specified by host name and return output This function is deprecated and will be removed in a future version of MaxiNet. Use experiment.get(node).cmd() instead """ return self.get_worker(host).run_cmd_on_host(host,cmd) def stop(self): """ stop experiment and shut down emulation on workers """ if self.isMonitoring: self.terminate_logging() for worker in self.cluster.workers(): worker.stop() self.cluster.remove_all_tunnels() class NodeWrapper: """ wrapper that allows most commands that can be used in mininet to be used in MaxiNet """ # this feels like doing rpc via rpc... def __init__(self,nodename,worker): self.nn = nodename self.worker = worker def _call(self,cmd, *params1, **params2): return self.worker.rpc(self.nn, cmd, *params1, **params2) def _get(self,name): return self.worker.rattr(self.nn, name) def __getattr__(self,name): def method(*params1,**params2): return self._call(name,*params1,**params2) # the following commands SHOULD work. no guarantee given if name in [ "cleanup", "read", "readline", "write", "terminate", "stop", "waitReadable", "sendCmd", "sendInt", "monitor", "waitOutput", "cmd", "cmdPrint", "pexec", "newPort", "addIntf", "defaultIntf", "intf", "connectionsTo", "deleteIntfs", "setARP", "setIP", "IP", "MAC", "intfIsUp", "config", "configDefault", "intfNames", "cgroupSet", "cgroupGet", "cgroupDel", "chrt", "rtInfo", "cfsInfo", "setCPUFrac", "setCPUs", "defaultDpid", "defaultIntf", "connected", "setup", "dpctl", "start", "stop", "attach", "detach", "controllerUUIDs", "checkListening"]: return method elif name in [ "name","inNamespace","params","nameToIntf","waiting"]: return self._get(name) else: raise AttributeError(name) def __repr__(self): return "NodeWrapper ("+self.nn+" at "+str(self.worker)+")" Fix MaxiNet setup with small MTU #!/usr/bin/python """ This module is the central part of MaxiNet and is intended to be the only part of MaxiNet which needs to be used by the user or third-party applications """ import re import sys import logging from functools import partial import time import subprocess import random import atexit from mininet.node import RemoteController, UserSwitch import Pyro4 from MaxiNet.Frontend.tools import Tools import tools from client import Frontend, log_and_reraise_remote_exception from partitioner import Partitioner from MaxiNet.Frontend.cli import CLI # the following block is to support deprecation warnings. this is really not # solved nicely and should probably be somewhere else import warnings import functools logger = logging.getLogger(__name__) def deprecated(func): '''This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.''' @functools.wraps(func) def new_func(*args, **kwargs): logger.warn("Call to deprecated function {}.".format(func.__name__)) warnings.warn_explicit( "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1 ) return func(*args, **kwargs) return new_func def run_cmd(cmd): """ run cmd on frontend machine """ return subprocess.check_output(cmd,shell=False) def run_cmd_shell(cmd): """ run cmd on frontend machine with the shell """ return subprocess.check_output(cmd,shell=True) class Worker: """ represents a worker machine in an running experiment """ def __init__(self,frontend,wid,switch=UserSwitch): self.creator = frontend.getObjectProxy(wid+".mnCreator") self.cmd = frontend.getObjectProxy(wid+".cmd") self.config = frontend.getObjectProxy("config") if(not self.config.runWith1500MTU()): self._fix_mtus() self.switch=switch self.x11tunnels=[] self.wid=int(wid[6:]) @log_and_reraise_remote_exception def hn(self): """ return hostname of worker machine """ return self.cmd.get_hostname() def set_switch(self,switch): """ set default switch class """ self.switch=switch @log_and_reraise_remote_exception def configLinkStatus(self, src, dst, status): """ wrapper for configLinkStatus method on remote mininet""" self.creator.configLinkStatus(src,dst,status) @log_and_reraise_remote_exception def ip(self): """ return public ip adress of worker machine """ return self.config.getIP(self.hn()) @log_and_reraise_remote_exception def start(self,topo,tunnels, controller=None): """ start mininet instance on worker machine emulating the in topo specified topology. if controller is not specified mininet will start an own controller for this net """ if controller: self.creator.create_mininet(topo=topo, tunnels=tunnels, controller = controller, switch=self.switch) else: self.creator.create_mininet(topo=topo, tunnels=tunnels, switch=self.switch) @log_and_reraise_remote_exception def daemonize(self,cmd): """ run command in background and terminate when MaxiNet is shut down """ self.cmd.daemonize(cmd) @log_and_reraise_remote_exception def tunnelX11(self,node): """ create X11 tunnel on worker to make x-forwarding work """ if(not node in self.x11tunnels): try: display = subprocess.check_output("ssh -Y "+self.hn()+ " env | grep DISPLAY",shell=True)[8:] self.creator.tunnelX11(node,display) self.x11tunnels.append(node) except subprocess.CalledProcessError: return False return True @log_and_reraise_remote_exception def run_cmd_on_host(self,host,cmd): """ run cmd in context of host and return output, where host is the name of an host in the mininet instance running on the worker machine """ return self.creator.runCmdOnHost(host,cmd) @log_and_reraise_remote_exception def run_cmd(self,cmd): """ run cmd on worker machine and return output """ return self.cmd.check_output(cmd) @log_and_reraise_remote_exception def run_script(self,cmd): """ run cmd on worker machine from the Worker/bin directory return output """ return self.cmd.script_check_output(cmd) @log_and_reraise_remote_exception def rpc(self, host, cmd, *params1, **params2): """ internal function to do rpc calls """ return self.creator.rpc(host, cmd, *params1, **params2) @log_and_reraise_remote_exception def rattr(self, host,name): """ internal function to get attributes of objects """ return self.creator.attr(host, name) @log_and_reraise_remote_exception def _fix_mtus(self): if self.ip() is None : logger.warn("no ip configured - can not fix MTU ") return 0 intf = self.run_cmd("ip addr show to "+self.ip()+"/24 | head -n1 | cut -d' ' -f2 | tr -d :").strip() if intf == "" : logger.warn("could not find eth device - can not fix MTU") return 0 mtu = int(self.run_cmd("ip li show dev "+intf+" | head -n1 | cut -d ' ' -f5")) if(mtu<1600): self.run_cmd("ip li se dev "+intf+" mtu 1600") @log_and_reraise_remote_exception def stop(self): """ stop mininet instance on this worker """ return self.creator.stop() def get_file(self,src,dst): """ transfer file specified by src on worker to dst on frontend. uses scp command to transfer file """ cmd_get = ["scp",self.hn()+":\""+src+"\"",dst] subprocess.call(cmd_get) def put_file(self,src,dst): """ transfer file specified by src on frontend to dst on worker. uses scp command to transfer file """ cmd_put = ["scp",src,self.hn()+":"+dst] subprocess.call(cmd_put) @log_and_reraise_remote_exception def addHost(self,name, cls=None, **params): """ add host at runtime. you probably want to use Experiment.addHost """ return self.creator.addHost(name,cls,**params) @log_and_reraise_remote_exception def addSwitch(self,name, cls=None,**params): """ add switch at runtime. you probably want to use Experiment.addSwitch """ return self.creator.addSwitch(name,cls,**params) @log_and_reraise_remote_exception def addController(self,name="c0", controller=None, **params): """ add controller at runtime. you probably want to use Experiment.addController """ return self.creator.addHost(name,controller,**params) @log_and_reraise_remote_exception def addTunnel(self,name, switch, port, cls, **params): """ add tunnel at runtime. you probably want to use Experiment.addLink """ return self.creator.addTunnel(name, switch, port, cls,**params) @log_and_reraise_remote_exception def addLink(self, node1, node2, port1 = None, port2 = None, cls = None, **params): """ add link at runtime. you probably want to use Experiment.addLink """ return self.creator.addLink(node1, node2, port1, port2, cls, **params) class TunHelper: """ internal class to manage tunnel interface names """ def __init__(self): self.tunnr = 0 self.keynr = 0 def get_tun_nr(self): self.tunnr = self.tunnr +1 return self.tunnr -1 def get_key_nr(self): self.keynr = self.keynr+1 return self.keynr -1 def get_last_tun_nr(self): return self.tunnr - 1 def get_last_key_nr(self): return self.keynr - 1 class Cluster: """ manage a set of workers via this class. to create several different topologys do not destroy/recreate this class but define several Experiment instances running sequential """ def __init__(self,*hosts): """ create Cluster object. Starting with MaxiNet 0.2 the hosts parameter is optional. If None is given all configured hosts will be used """ self.running=False self.logger = logging.getLogger(__name__) self.tunhelper = TunHelper() self.config = tools.Config("","",register = False) logging.basicConfig(level=self.config.getLoggingLevel()) if hosts: self.hosts = hosts else: self.hosts = self.config.getHosts() self.worker=[] if(self.hosts[0]!=subprocess.check_output(["hostname"]).strip()): rhost = self.config.getIP(self.hosts[0]) else: if(len(self.hosts)>1): rhost = self.config.getIP(self.hosts[1]) else: rhost = self.config.getIP(self.hosts[0]) if sys.platform == "darwin": route = subprocess.check_output(["route", "-vn", "get", rhost]).splitlines()[-1] m = re.search(r' (\d+\.\d+\.\d+\.\d+)$', route) else: route = subprocess.check_output("ip route get "+rhost+" | head -n1", shell=True).strip() m = re.search(r'src (\d+\.\d+\.\d+\.\d+)', route) if m is not None: self.localIP = m.group(1) else: self.localIP="127.0.0.1" self.nsport=9090 self.frontend = Frontend(self.localIP, self.nsport) self.config = tools.Config(self.localIP,self.nsport) atexit.register(run_cmd, self.getWorkerMangerCMD("--stop")) atexit.register(self._stop) def getWorkerMangerCMD(self,cmd): cmdline = [self.config.getWorkerScript("worker_manager.py"), "--ns", self.localIP+":" + str(self.nsport), cmd] if self.config.debugPyroOnWorker(): cmdline.append("--debugPyro") if self.config.keepScreenOpenOnError(): cmdline.append("--keepScreenOpenOnError") cmdline.extend(self.hosts) return cmdline def is_running(self): return self.running def get_worker_shares(self): """ returns list of workload shares per worker """ shares=None if(self.running): shares=[] for w in self.worker: shares.append(self.config.getShare(w.hn())) wsum=0 for w in shares: wsum+=w shares = map(lambda x: x/float(wsum),shares) return shares def check_reachability(self,ip): """ check whether ip is reachable for a packet with MTU > 1500 """ cmd = ["ping","-c 2","-s 1520",ip] if(subprocess.call(cmd,stdout=open("/dev/null"),) == 1): return False else: return True def start(self): """ start MaxiNet on assigned worker machines and establish communication. Returns True in case of successful startup. """ self.logger.info("starting worker processes") cmd = self.getWorkerMangerCMD("--start") if self.frontend.hmac_key(): cmd.extend(["--hmac",self.frontend.hmac_key()]) self.logger.debug(run_cmd(cmd)) timeout = 10 for i in range(0,len(self.hosts)): self.logger.info("waiting for Worker "+str(i+1)+" to register on nameserver...") started=False end = time.time()+timeout while(not started): try: self.frontend.lookup("worker"+str(i+1)+".mnCreator") started=True except Pyro4.errors.NamingError: if(time.time()>end): raise RuntimeError("Timed out waiting for worker "+str(i+1)+".mnCreator to register.") time.sleep(0.1) started=False end=time.time()+timeout while(not started): try: self.frontend.lookup("worker"+str(i+1)+".cmd") started=True except Pyro4.errors.NamingError: time.sleep(0.1) if(time.time()>end): raise RuntimeError("Timed out waiting for worker "+str(i+1)+".cmd to register.") self.worker.append(Worker(self.frontend,"worker"+str(i+1))) if(not self.config.runWith1500MTU()): for host in self.hosts: if(not self.check_reachability(self.config.getIP(host))): self.logger.error("Host "+host+" is not reachable with an MTU > 1500.") raise RuntimeError("Host "+host+" is not reachable with an MTU > 1500.") for worker in self.worker: worker.run_script("load_tunneling.sh") self.logger.info("worker processes started") self.running = True return True def _stop(self): if(self.running): self.logger.info("removing tunnels...") self.remove_all_tunnels() #self.logger.info("shutting cluster down...") self.logger.info("removing nameserver entries...") for i in range(0,self.num_workers()): self.frontend.remove("worker"+str(i)+".mnCreator") self.frontend.remove("worker"+str(i)+".cmd") self.logger.info("shutting down frontend...") self.logger.info("Goodbye.") self.running=False def stop(self): """ stop cluster and shut it down """ self._stop() def num_workers(self): """ return number of worker nodes in this cluster """ return len(self.workers()) def workers(self): """ return list of worker instances for this cluster, ordered by worker id """ if(self.is_running()): return self.worker else: return [] def get_worker(self, wid): """ return worker with id wid """ return self.workers()[wid] def create_tunnel(self,w1,w2): """ create gre tunnel connecting worker machine w1 and w2 and return name of created network interface """ tid = self.tunhelper.get_tun_nr() tkey = self.tunhelper.get_key_nr() intf = "mn_tun"+str(tid) ip1=w1.ip() ip2=w2.ip() self.logger.debug("invoking tunnel create commands on "+ip1+" and "+ip2) w1.run_script("create_tunnel.sh "+ip1+" "+ip2+" "+intf+" "+str(tkey)) w2.run_script("create_tunnel.sh "+ip2+" "+ip1+" "+intf+" "+str(tkey)) self.logger.debug("done") return intf def remove_all_tunnels(self): """ shut down all tunnels created in this cluster """ for worker in self.workers(): worker.run_script("delete_tunnels.sh") self.tunhelper = TunHelper() class Experiment: """ use this class to specify experiment. Experiments are created for one-time-usage and have to be stopped in the end. One cluster instance can run several experiments in sequence """ def __init__(self,cluster, topology,controller=None, is_partitioned=False, switch=UserSwitch): self.cluster = cluster self.logger = logging.getLogger(__name__) self.topology=None self.config = tools.Config(register = False) self.starttime = time.localtime() self.printed_log_info = False self.isMonitoring = False if is_partitioned: self.topology = topology else: self.origtopology = topology self.node_to_workerid = {} self.node_to_wrapper = {} self.nodes = [] self.hosts = [] self.tunnellookup = {} self.switches = [] self.switch=switch if controller: contr = controller else: contr = self.config.getController() if contr.find(":")>=0: (host, port) = contr.split(":") else: host = contr port = "6633" self.controller = partial(RemoteController, ip=host, port=int(port)) def configLinkStatus(self, src, dst, status): """Change status of src <-> dst links. src: node name dst: node name status: string {up, down}""" ws = self.get_worker(src) wd = self.get_worker(dst) if(ws==wd): # src and dst are on same worker. let mininet handle this ws.configLinkStatus(src,dst,status) else: src=self.get(src) dst=self.get(dst) intf = self.tunnellookup[(src.name,dst.name)] src.cmd("ifconfig "+intf+" "+status) dst.cmd("ifconfig "+intf+" "+status) @deprecated def find_worker(self,node): """ return worker which emulates the specified node. Replaced by get_worker """ return self.get_worker(node) def get_worker(self,node): """ return worker which emulates the specified node. """ if(isinstance(node,NodeWrapper)): return node.worker return self.cluster.get_worker(self.node_to_workerid[node]) def get_log_folder(self): """ returns folder to which log files will be saved """ return "/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/" def terminate_logging(self): for worker in self.cluster.workers(): worker.run_cmd("killall getRxTx.sh getMemoryUsage.sh") self.isMonitoring = False def log_cpu(self): """ log cpu useage of workers and place log files in /tmp/maxinet_logs/ """ for worker in self.cluster.workers(): self.log_cpu_of_worker(worker) def log_cpu_of_worker(self,worker): """ log cpu usage of worker and place log file in /tmp/maxinet_logs/ """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) atexit.register(worker.get_file,"/tmp/maxinet_cpu_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") worker.daemonize("LANG=en_EN.UTF-8 mpstat 1 | while read l; do echo -n \"`date +%s` \" ; echo \"$l \" ; done > \"/tmp/maxinet_cpu_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def log_free_memory(self): """ log memory usage of workers and place log files in /tmp/maxinet_logs Format is: timestamp,FreeMemory,Buffers,Cached """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) for worker in self.cluster.workers(): atexit.register(worker.get_file,"/tmp/maxinet_mem_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") memmon = worker.config.getWorkerScript("getMemoryUsage.sh") worker.daemonize(memmon + " > \"/tmp/maxinet_mem_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def log_interfaces_of_node(self,node): """ logs statistics of interfaces of node and places them in /tmp/maxinet_logs Format is: timestamp,received bytes,sent bytes,received packets,sent packets """ subprocess.call(["mkdir","-p","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/"]) node = self.get(node) worker = self.get_worker(node) for intf in node.intfNames(): self.log_interface(worker,intf) def log_interface(self,worker,intf): """ logs statistics of interface of worker and places them in /tmp/maxinet_logs Format is: timestamp,received bytes,sent bytes,received packets,sent packets """ atexit.register(worker.get_file,"/tmp/maxinet_intf_"+intf+"_"+str(worker.wid)+"_("+worker.hn()+").log","/tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/") ethmon = worker.config.getWorkerScript("getRxTx.sh") worker.daemonize(ethmon + " "+intf+" > \"/tmp/maxinet_intf_"+intf+"_"+str(worker.wid)+"_("+worker.hn()+").log\"") atexit.register(self._print_log_info) def monitor(self): """ logs statistics of worker interfaces and memory usage and places them in /tmp/maxinet_logs """ self.isMonitoring = True atexit.register(self._print_monitor_info) self.log_free_memory() self.log_cpu() for worker in self.cluster.workers(): intf = worker.run_cmd("ip addr show to "+worker.ip()+"/24 | head -n1 | cut -d' ' -f2 | tr -d :").strip() if(intf == ""): self.logger.warn("could not find main interface for "+worker.hn()+". no logging possible.") else: self.log_interface(worker,intf) def _print_log_info(self): if(not self.printed_log_info): self.printed_log_info=True self.logger.info("Log files will be placed in /tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/. You might want to save them somewhere else.") def _print_monitor_info(self): self.logger.info("You monitored this experiment. To generate a graph from your logs call \"/usr/local/share/MaxiNet/maxinet_plot.py /tmp/maxinet_logs/"+Tools.time_to_string(self.starttime)+"/ plot.png\" ") def CLI(self,plocals,pglobals): """ open interactive command line interface """ CLI(self,plocals,pglobals) def addNode(self, name,**params): """ add node at runtime. use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ wid = random.randint(0,self.cluster.num_workers()-1) if "pos" in params.keys(): wid = self.node_to_workerid[params["pos"]] del params["pos"] if "wid" in params.keys(): wid = int(params["wid"])-1 del params["wid"] worker = self.cluster.get_worker(wid) self.node_to_workerid[name]=wid self.node_to_wrapper[name]=NodeWrapper(name,self.get_worker(name)) self.nodes.append(self.node_to_wrapper[name]) def addHost(self, name, cls = None, **params): """ add host at runtime. use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name,**params) self.get_worker(name).addHost(name,cls, **params) self.hosts.append(self.get(name)) return self.get(name) def addSwitch(self, name, cls = None, **params): """ add switch at runtime use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name, **params) self.get_worker(name).addSwitch(name,cls, **params) self.switches.append(self.get(name)) return self.get(name) def addController(self, name="c0",controller = None, **params): """ add controller at runtime use parameter wid to specifiy worker id or pos to specify worker of existing node. otherwise random worker is chosen """ self.addNode(name, **params) self.get_worker(name).addController(name, controller, **params) return self.get(name) def name(self, node): """ return name assigned to specified network node. """ if(isinstance(node,NodeWrapper)): return node.nn return node def addLink(self, node1, node2, port1 = None, port2 = None, cls = None, autoconf=False, **params): """ add links at runtime. will create tunnels between workers if necessary, but can not create tunnels between hosts and switches. (links will work fine) autoconf parameter handles attach() and config calls on switches and hosts """ w1 = self.get_worker(node1) w2 = self.get_worker(node2) node1 = self.get(node1) node2 = self.get(node2) if(w1==w2): self.logger.debug("no tunneling needed") l=w1.addLink(self.name(node1),self.name(node2), port1, port2, cls, **params) else: self.logger.debug("tunneling needed") if(not ((node1 in self.switches) and (node2 in self.switches))): self.logger.error("We cannot create tunnels between switches and hosts. Sorry.") raise RuntimeError("Can't create tunnel between switch and host") intf = self.cluster.create_tunnel(w1,w2) w1.addTunnel(intf,self.name(node1), port1, cls, **params) w2.addTunnel(intf,self.name(node2), port2, cls, **params) l=((self.name(node1),intf),(self.name(node2),intf)) if(autoconf): if(node1 in self.switches): node1.attach(l[0][1]) else: node1.configDefault() if(node2 in self.switches): node2.attach(l[1][1]) else: node2.configDefault() if(self.config.runWith1500MTU()): self.setMTU(node1,1450) self.setMTU(node2,1450) def _find_topo_of_node(self,node, topos): for topo in topos: if node in topo.g.nodes(): return topo def get_node(self, nodename): """ return node that is specified by nodename :rtype : NodeWrapper """ if(self.node_to_wrapper.has_key(nodename)): return self.node_to_wrapper[nodename] else: return None def get(self,nodename): """ alias for get_node """ return self.get_node(nodename) def setup(self): """ start cluster if not yet started, assign topology parts to workers and start workers """ if(not self.cluster.is_running()): self.cluster.start() if(not self.cluster.is_running()): raise RuntimeError("Cluster won't start") self.logger.info("Clustering topology...") if(not self.topology): parti = Partitioner() parti.loadtopo(self.origtopology) self.topology = parti.partition(self.cluster.num_workers(),self.cluster.get_worker_shares()) # assigning shares to workers requires that the workers are already startet. elsewise we don't have a way to determine the workerid of the worker. topologies are assigned to workers in ascending workerid order self.logger.debug("Tunnels: "+str(self.topology.getTunnels())) subtopos = self.topology.getTopos() if(len(subtopos) > self.cluster.num_workers()): raise RuntimeError("Cluster does not have enough workers for given topology") for subtopo in subtopos: for node in subtopo.nodes(): self.node_to_workerid[node]=subtopos.index(subtopo) self.nodes.append(NodeWrapper(node, self.get_worker(node))) self.node_to_wrapper[node]=self.nodes[-1] if (not subtopo.isSwitch(node)): self.hosts.append(self.nodes[-1]) else: self.switches.append(self.nodes[-1]) self.logger.debug("Nodemapping: %s",self.node_to_workerid) tunnels = [[] for x in range(len(subtopos))] for tunnel in self.topology.getTunnels(): w1 = self.get_worker(tunnel[0]) w2 = self.get_worker(tunnel[1]) intf = self.cluster.create_tunnel(w1,w2) self.tunnellookup[(tunnel[0],tunnel[1])]=intf self.tunnellookup[(tunnel[1],tunnel[0])]=intf for i in range(0,2): tunnels[self.node_to_workerid[tunnel[i]]].append([intf, tunnel[i], tunnel[2]]) # Assumes that workerid = subtopoid for topo in subtopos: self.cluster.workers()[subtopos.index(topo)].set_switch(self.switch) if(self.controller): self.cluster.workers()[subtopos.index(topo)].start(topo=topo, tunnels=tunnels[subtopos.index(topo)], controller=self.controller) else: self.cluster.workers()[subtopos.index(topo)].start(topo=topo, tunnels=tunnels[subtopos.index(topo)]) if (self.config.runWith1500MTU()): for topo in subtopos: for host in topo.nodes(): self.setMTU(host,1450) def setMTU(self,host,mtu): for intf in host.intfNames(): host.cmd("ifconfig "+intf+" mtu "+str(mtu)) @deprecated def run_cmd_on_host(self,host, cmd): """ run cmd on emulated host specified by host name and return output This function is deprecated and will be removed in a future version of MaxiNet. Use experiment.get(node).cmd() instead """ return self.get_worker(host).run_cmd_on_host(host,cmd) def stop(self): """ stop experiment and shut down emulation on workers """ if self.isMonitoring: self.terminate_logging() for worker in self.cluster.workers(): worker.stop() self.cluster.remove_all_tunnels() class NodeWrapper: """ wrapper that allows most commands that can be used in mininet to be used in MaxiNet """ # this feels like doing rpc via rpc... def __init__(self,nodename,worker): self.nn = nodename self.worker = worker def _call(self,cmd, *params1, **params2): return self.worker.rpc(self.nn, cmd, *params1, **params2) def _get(self,name): return self.worker.rattr(self.nn, name) def __getattr__(self,name): def method(*params1,**params2): return self._call(name,*params1,**params2) # the following commands SHOULD work. no guarantee given if name in [ "cleanup", "read", "readline", "write", "terminate", "stop", "waitReadable", "sendCmd", "sendInt", "monitor", "waitOutput", "cmd", "cmdPrint", "pexec", "newPort", "addIntf", "defaultIntf", "intf", "connectionsTo", "deleteIntfs", "setARP", "setIP", "IP", "MAC", "intfIsUp", "config", "configDefault", "intfNames", "cgroupSet", "cgroupGet", "cgroupDel", "chrt", "rtInfo", "cfsInfo", "setCPUFrac", "setCPUs", "defaultDpid", "defaultIntf", "connected", "setup", "dpctl", "start", "stop", "attach", "detach", "controllerUUIDs", "checkListening"]: return method elif name in [ "name","inNamespace","params","nameToIntf","waiting"]: return self._get(name) else: raise AttributeError(name) def __repr__(self): return "NodeWrapper ("+self.nn+" at "+str(self.worker)+")"
""" Provides a JSON API for common components. """ # -*- coding: utf-8 -*- from __future__ import unicode_literals import json from django.http.response import HttpResponse from django.utils.decorators import method_decorator from django.urls import path from django.views.decorators.csrf import csrf_exempt from django.urls import include, re_path from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.exceptions import NotAcceptable, NotFound from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters, generics, permissions from rest_framework import serializers from django_q.tasks import async_task import common.models import common.serializers from InvenTree.helpers import inheritors from plugin.models import NotificationUserSetting from plugin.serializers import NotificationUserSettingSerializer class CsrfExemptMixin(object): """ Exempts the view from CSRF requirements. """ @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(CsrfExemptMixin, self).dispatch(*args, **kwargs) class WebhookView(CsrfExemptMixin, APIView): """ Endpoint for receiving webhooks. """ authentication_classes = [] permission_classes = [] model_class = common.models.WebhookEndpoint run_async = False def post(self, request, endpoint, *args, **kwargs): # get webhook definition self._get_webhook(endpoint, request, *args, **kwargs) # check headers headers = request.headers try: payload = json.loads(request.body) except json.decoder.JSONDecodeError as error: raise NotAcceptable(error.msg) # validate self.webhook.validate_token(payload, headers, request) # process data message = self.webhook.save_data(payload, headers, request) if self.run_async: async_task(self._process_payload, message.id) else: self._process_result( self.webhook.process_payload(message, payload, headers), message, ) data = self.webhook.get_return(payload, headers, request) return HttpResponse(data) def _process_payload(self, message_id): message = common.models.WebhookMessage.objects.get(message_id=message_id) self._process_result( self.webhook.process_payload(message, message.body, message.header), message, ) def _process_result(self, result, message): if result: message.worked_on = result message.save() else: message.delete() def _escalate_object(self, obj): classes = inheritors(obj.__class__) for cls in classes: mdl_name = cls._meta.model_name if hasattr(obj, mdl_name): return getattr(obj, mdl_name) return obj def _get_webhook(self, endpoint, request, *args, **kwargs): try: webhook = self.model_class.objects.get(endpoint_id=endpoint) self.webhook = self._escalate_object(webhook) self.webhook.init(request, *args, **kwargs) return self.webhook.process_webhook() except self.model_class.DoesNotExist: raise NotFound() class SettingsList(generics.ListAPIView): filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] ordering_fields = [ 'pk', 'key', 'name', ] search_fields = [ 'key', ] class GlobalSettingsList(SettingsList): """ API endpoint for accessing a list of global settings objects """ queryset = common.models.InvenTreeSetting.objects.all() serializer_class = common.serializers.GlobalSettingsSerializer class GlobalSettingsPermissions(permissions.BasePermission): """ Special permission class to determine if the user is "staff" """ def has_permission(self, request, view): """ Check that the requesting user is 'admin' """ try: user = request.user return user.is_staff except AttributeError: return False class GlobalSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "global setting" object. - User must have 'staff' status to view / edit """ queryset = common.models.InvenTreeSetting.objects.all() serializer_class = common.serializers.GlobalSettingsSerializer permission_classes = [ GlobalSettingsPermissions, ] class UserSettingsList(SettingsList): """ API endpoint for accessing a list of user settings objects """ queryset = common.models.InvenTreeUserSetting.objects.all() serializer_class = common.serializers.UserSettingsSerializer def filter_queryset(self, queryset): """ Only list settings which apply to the current user """ try: user = self.request.user except AttributeError: return common.models.InvenTreeUserSetting.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class UserSettingsPermissions(permissions.BasePermission): """ Special permission class to determine if the user can view / edit a particular setting """ def has_object_permission(self, request, view, obj): try: user = request.user except AttributeError: return False return user == obj.user class UserSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "user setting" object - User can only view / edit settings their own settings objects """ queryset = common.models.InvenTreeUserSetting.objects.all() serializer_class = common.serializers.UserSettingsSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationUserSettingsList(SettingsList): """ API endpoint for accessing a list of notification user settings objects """ queryset = NotificationUserSetting.objects.all() serializer_class = NotificationUserSettingSerializer def filter_queryset(self, queryset): """ Only list settings which apply to the current user """ try: user = self.request.user except AttributeError: return NotificationUserSetting.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class NotificationUserSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "notification user setting" object - User can only view / edit settings their own settings objects """ queryset = NotificationUserSetting.objects.all() serializer_class = NotificationUserSettingSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationList(generics.ListAPIView): queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationMessageSerializer filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] ordering_fields = [ 'category', 'name', 'read', ] search_fields = [ 'name', 'message', ] filter_fields = [ 'category', 'read', ] def filter_queryset(self, queryset): """ Only list notifications which apply to the current user """ try: user = self.request.user except AttributeError: return common.models.NotificationMessage.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class NotificationDetail(generics.RetrieveUpdateDestroyAPIView): """ Detail view for an individual notification object - User can only view / delete their own notification objects """ queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationMessageSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationReadEdit(generics.CreateAPIView): """ general API endpoint to manipulate read state of a notification """ queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationReadSerializer permission_classes = [ UserSettingsPermissions, ] def get_serializer_context(self): context = super().get_serializer_context() if self.request: context['instance'] = self.get_object() return context def perform_create(self, serializer): message = self.get_object() try: message.read = self.target message.save() except Exception as exc: raise serializers.ValidationError(detail=serializers.as_serializer_error(exc)) class NotificationRead(NotificationReadEdit): """ API endpoint to mark a notification as read. """ target = True class NotificationUnread(NotificationReadEdit): """ API endpoint to mark a notification as unread. """ target = False class NotificationReadAll(generics.RetrieveAPIView): """ API endpoint to mark all notifications as read. """ queryset = common.models.NotificationMessage.objects.all() permission_classes = [ UserSettingsPermissions, ] def get(self, request, *args, **kwargs): try: self.queryset.filter(user=request.user, read=False).update(read=True) return Response({'status': 'ok'}) except Exception as exc: raise serializers.ValidationError(detail=serializers.as_serializer_error(exc)) settings_api_urls = [ # User settings re_path(r'^user/', include([ # User Settings Detail re_path(r'^(?P<pk>\d+)/', UserSettingsDetail.as_view(), name='api-user-setting-detail'), # User Settings List re_path(r'^.*$', UserSettingsList.as_view(), name='api-user-setting-list'), ])), # Notification settings re_path(r'^notification/', include([ # Notification Settings Detail re_path(r'^(?P<pk>\d+)/', NotificationUserSettingsDetail.as_view(), name='api-notification-setting-detail'), # Notification Settings List re_path(r'^.*$', NotificationUserSettingsList.as_view(), name='api-notifcation-setting-list'), ])), # Global settings re_path(r'^global/', include([ # Global Settings Detail re_path(r'^(?P<pk>\d+)/', GlobalSettingsDetail.as_view(), name='api-global-setting-detail'), # Global Settings List re_path(r'^.*$', GlobalSettingsList.as_view(), name='api-global-setting-list'), ])), ] common_api_urls = [ # Webhooks path('webhook/<slug:endpoint>/', WebhookView.as_view(), name='api-webhook'), # Notifications re_path(r'^notifications/', include([ # Individual purchase order detail URLs re_path(r'^(?P<pk>\d+)/', include([ re_path(r'^read/', NotificationRead.as_view(), name='api-notifications-read'), re_path(r'^unread/', NotificationUnread.as_view(), name='api-notifications-unread'), re_path(r'.*$', NotificationDetail.as_view(), name='api-notifications-detail'), ])), # Read all re_path(r'^readall/', NotificationReadAll.as_view(), name='api-notifications-readall'), # Notification messages list re_path(r'^.*$', NotificationList.as_view(), name='api-notifications-list'), ])), ] only safety test - api is not callable as non-user """ Provides a JSON API for common components. """ # -*- coding: utf-8 -*- from __future__ import unicode_literals import json from django.http.response import HttpResponse from django.utils.decorators import method_decorator from django.urls import path from django.views.decorators.csrf import csrf_exempt from django.urls import include, re_path from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.exceptions import NotAcceptable, NotFound from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters, generics, permissions from rest_framework import serializers from django_q.tasks import async_task import common.models import common.serializers from InvenTree.helpers import inheritors from plugin.models import NotificationUserSetting from plugin.serializers import NotificationUserSettingSerializer class CsrfExemptMixin(object): """ Exempts the view from CSRF requirements. """ @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(CsrfExemptMixin, self).dispatch(*args, **kwargs) class WebhookView(CsrfExemptMixin, APIView): """ Endpoint for receiving webhooks. """ authentication_classes = [] permission_classes = [] model_class = common.models.WebhookEndpoint run_async = False def post(self, request, endpoint, *args, **kwargs): # get webhook definition self._get_webhook(endpoint, request, *args, **kwargs) # check headers headers = request.headers try: payload = json.loads(request.body) except json.decoder.JSONDecodeError as error: raise NotAcceptable(error.msg) # validate self.webhook.validate_token(payload, headers, request) # process data message = self.webhook.save_data(payload, headers, request) if self.run_async: async_task(self._process_payload, message.id) else: self._process_result( self.webhook.process_payload(message, payload, headers), message, ) data = self.webhook.get_return(payload, headers, request) return HttpResponse(data) def _process_payload(self, message_id): message = common.models.WebhookMessage.objects.get(message_id=message_id) self._process_result( self.webhook.process_payload(message, message.body, message.header), message, ) def _process_result(self, result, message): if result: message.worked_on = result message.save() else: message.delete() def _escalate_object(self, obj): classes = inheritors(obj.__class__) for cls in classes: mdl_name = cls._meta.model_name if hasattr(obj, mdl_name): return getattr(obj, mdl_name) return obj def _get_webhook(self, endpoint, request, *args, **kwargs): try: webhook = self.model_class.objects.get(endpoint_id=endpoint) self.webhook = self._escalate_object(webhook) self.webhook.init(request, *args, **kwargs) return self.webhook.process_webhook() except self.model_class.DoesNotExist: raise NotFound() class SettingsList(generics.ListAPIView): filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] ordering_fields = [ 'pk', 'key', 'name', ] search_fields = [ 'key', ] class GlobalSettingsList(SettingsList): """ API endpoint for accessing a list of global settings objects """ queryset = common.models.InvenTreeSetting.objects.all() serializer_class = common.serializers.GlobalSettingsSerializer class GlobalSettingsPermissions(permissions.BasePermission): """ Special permission class to determine if the user is "staff" """ def has_permission(self, request, view): """ Check that the requesting user is 'admin' """ try: user = request.user return user.is_staff except AttributeError: return False class GlobalSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "global setting" object. - User must have 'staff' status to view / edit """ queryset = common.models.InvenTreeSetting.objects.all() serializer_class = common.serializers.GlobalSettingsSerializer permission_classes = [ GlobalSettingsPermissions, ] class UserSettingsList(SettingsList): """ API endpoint for accessing a list of user settings objects """ queryset = common.models.InvenTreeUserSetting.objects.all() serializer_class = common.serializers.UserSettingsSerializer def filter_queryset(self, queryset): """ Only list settings which apply to the current user """ try: user = self.request.user except AttributeError: return common.models.InvenTreeUserSetting.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class UserSettingsPermissions(permissions.BasePermission): """ Special permission class to determine if the user can view / edit a particular setting """ def has_object_permission(self, request, view, obj): try: user = request.user except AttributeError: # pragma: no cover return False return user == obj.user class UserSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "user setting" object - User can only view / edit settings their own settings objects """ queryset = common.models.InvenTreeUserSetting.objects.all() serializer_class = common.serializers.UserSettingsSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationUserSettingsList(SettingsList): """ API endpoint for accessing a list of notification user settings objects """ queryset = NotificationUserSetting.objects.all() serializer_class = NotificationUserSettingSerializer def filter_queryset(self, queryset): """ Only list settings which apply to the current user """ try: user = self.request.user except AttributeError: return NotificationUserSetting.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class NotificationUserSettingsDetail(generics.RetrieveUpdateAPIView): """ Detail view for an individual "notification user setting" object - User can only view / edit settings their own settings objects """ queryset = NotificationUserSetting.objects.all() serializer_class = NotificationUserSettingSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationList(generics.ListAPIView): queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationMessageSerializer filter_backends = [ DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter, ] ordering_fields = [ 'category', 'name', 'read', ] search_fields = [ 'name', 'message', ] filter_fields = [ 'category', 'read', ] def filter_queryset(self, queryset): """ Only list notifications which apply to the current user """ try: user = self.request.user except AttributeError: return common.models.NotificationMessage.objects.none() queryset = super().filter_queryset(queryset) queryset = queryset.filter(user=user) return queryset class NotificationDetail(generics.RetrieveUpdateDestroyAPIView): """ Detail view for an individual notification object - User can only view / delete their own notification objects """ queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationMessageSerializer permission_classes = [ UserSettingsPermissions, ] class NotificationReadEdit(generics.CreateAPIView): """ general API endpoint to manipulate read state of a notification """ queryset = common.models.NotificationMessage.objects.all() serializer_class = common.serializers.NotificationReadSerializer permission_classes = [ UserSettingsPermissions, ] def get_serializer_context(self): context = super().get_serializer_context() if self.request: context['instance'] = self.get_object() return context def perform_create(self, serializer): message = self.get_object() try: message.read = self.target message.save() except Exception as exc: raise serializers.ValidationError(detail=serializers.as_serializer_error(exc)) class NotificationRead(NotificationReadEdit): """ API endpoint to mark a notification as read. """ target = True class NotificationUnread(NotificationReadEdit): """ API endpoint to mark a notification as unread. """ target = False class NotificationReadAll(generics.RetrieveAPIView): """ API endpoint to mark all notifications as read. """ queryset = common.models.NotificationMessage.objects.all() permission_classes = [ UserSettingsPermissions, ] def get(self, request, *args, **kwargs): try: self.queryset.filter(user=request.user, read=False).update(read=True) return Response({'status': 'ok'}) except Exception as exc: raise serializers.ValidationError(detail=serializers.as_serializer_error(exc)) settings_api_urls = [ # User settings re_path(r'^user/', include([ # User Settings Detail re_path(r'^(?P<pk>\d+)/', UserSettingsDetail.as_view(), name='api-user-setting-detail'), # User Settings List re_path(r'^.*$', UserSettingsList.as_view(), name='api-user-setting-list'), ])), # Notification settings re_path(r'^notification/', include([ # Notification Settings Detail re_path(r'^(?P<pk>\d+)/', NotificationUserSettingsDetail.as_view(), name='api-notification-setting-detail'), # Notification Settings List re_path(r'^.*$', NotificationUserSettingsList.as_view(), name='api-notifcation-setting-list'), ])), # Global settings re_path(r'^global/', include([ # Global Settings Detail re_path(r'^(?P<pk>\d+)/', GlobalSettingsDetail.as_view(), name='api-global-setting-detail'), # Global Settings List re_path(r'^.*$', GlobalSettingsList.as_view(), name='api-global-setting-list'), ])), ] common_api_urls = [ # Webhooks path('webhook/<slug:endpoint>/', WebhookView.as_view(), name='api-webhook'), # Notifications re_path(r'^notifications/', include([ # Individual purchase order detail URLs re_path(r'^(?P<pk>\d+)/', include([ re_path(r'^read/', NotificationRead.as_view(), name='api-notifications-read'), re_path(r'^unread/', NotificationUnread.as_view(), name='api-notifications-unread'), re_path(r'.*$', NotificationDetail.as_view(), name='api-notifications-detail'), ])), # Read all re_path(r'^readall/', NotificationReadAll.as_view(), name='api-notifications-readall'), # Notification messages list re_path(r'^.*$', NotificationList.as_view(), name='api-notifications-list'), ])), ]
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libtiff(AutotoolsPackage): """libtiff graphics format library""" homepage = "http://www.simplesystems.org/libtiff/" url = "http://download.osgeo.org/libtiff/tiff-4.0.3.tar.gz" version('4.0.6', 'd1d2e940dea0b5ad435f21f03d96dd72') version('4.0.3', '051c1068e6a0627f461948c365290410') depends_on('jpeg') depends_on('zlib') depends_on('xz') Add latest version of libtiff (#4067) ############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libtiff(AutotoolsPackage): """libtiff graphics format library""" homepage = "http://www.simplesystems.org/libtiff/" url = "http://download.osgeo.org/libtiff/tiff-4.0.7.tar.gz" version('4.0.7', '77ae928d2c6b7fb46a21c3a29325157b') version('4.0.6', 'd1d2e940dea0b5ad435f21f03d96dd72') version('4.0.3', '051c1068e6a0627f461948c365290410') depends_on('jpeg') depends_on('zlib') depends_on('xz')
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import sys import os import numpy import numpy.random import random import itertools import time import math import copy import cPickle import struct from nupic.bindings.math import SM32, SM_01_32_32, count_gte, GetNTAReal from nupic.bindings.algorithms import Inhibition2, cpp_overlap, cpp_overlap_sbm from nupic.bindings.algorithms import adjustMasterValidPermanence from nupic.bindings.math import Random as NupicRandom from nupic.math.cross import cross from operator import itemgetter import nupic.research.fdrutilities as fdru realDType = GetNTAReal() gPylabInitialized = False # kDutyCycleFactor add dutyCycleAfterInh to overlap in Inhibition step to be a # tie breaker kDutyCycleFactor = 0.01 ################################################################################ def _extractCallingMethodArgs(): """ Returns args dictionary from the calling method """ import inspect import copy callingFrame = inspect.stack()[1][0] argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame) argNames.remove("self") args = copy.copy(frameLocalVarDict) for varName in frameLocalVarDict: if varName not in argNames: args.pop(varName) return args """ Class for spatial pooling based on fixed random distributed representation (FDR) """ class FDRCSpatial2(object): """ This version of FDRCSpatial inlcudes adaptive receptive fields, no-dupe rules and gradual boosting. It supports 1-D and 2-D topologies with cloning """ def __init__(self, inputShape = (32, 32), inputBorder = 8, inputDensity = 1.0, coincidencesShape = (48, 48), coincInputRadius = 16, coincInputPoolPct = 1.0, gaussianDist = False, commonDistributions = False, localAreaDensity = -1.0, numActivePerInhArea = 10.0, stimulusThreshold = 0, synPermInactiveDec = 0.01, synPermActiveInc = 0.1, synPermActiveSharedDec = 0.0, synPermOrphanDec = 0.0, synPermConnected = 0.10, minPctDutyCycleBeforeInh = 0.001, minPctDutyCycleAfterInh = 0.001, dutyCyclePeriod = 1000, maxFiringBoost = 10.0, maxSSFiringBoost = 2.0, maxSynPermBoost = 10.0, minDistance = 0.0, cloneMap = None, numCloneMasters = -1, seed = -1, spVerbosity = 0, printPeriodicStats = 0, testMode = False, globalInhibition = False, spReconstructionParam = "unweighted_mean", useHighTier = True, randomSP = False, ): """ Parameters: ---------------------------- inputShape: The dimensions of the input vector. Format is (height, width) e.g. (24, 72). If the input is from a sensor, it is interpreted as having a 2-D topology of 24 pixels high and 72 wide. inputBorder: The first column from an edge will be centered over an input which is 'inputBorder' inputs from the edge. inputDensity: The density of the input. This is only to aid in figuring out the initial number of connected synapses to place on each column. The lower the inputDensity, the more initial connections will be assigned to each column. coincidencesShape: The dimensions of column layout. Format is (height, width) e.g. (80,100) means a total of 80*100 = 800 are arranged in a 2-D topology with 80 rows and 100 columns. coincInputRadius: This defines the max radius of the receptive field of each column. This is used to limit memory requirements and processing time. It could be set large enough to encompass the entire input field and the SP would still work fine, but require more memory and processing time. This parameter defines a square area: a column will have a max square RF with sides of length 2 * coincInputRadius + 1. coincInputPoolPct What percent of the columns's receptive field is available for potential synapses. At initialization time, we will choose coincInputPoolPct * (2*coincInputRadius + 1)^2 potential synapses from the receptive field. gaussianDist: If true, the initial permanences assigned to each column will have a gaussian distribution to them, making the column favor inputs directly below it over inputs farther away. If false, the initial permanences will have a random distribution across the column's entire potential receptive field. commonDistributions: If set to True (the default, faster startup time), each column will be given the same initial permanence values. This is normally OK when you will be training, but if you will be sticking with the untrained network, you will want to set this to False (which makes startup take longer). localAreaDensity: The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area). numActivePerInhArea: An alternate way to control the density of the active columns. If numActivePerInhArea is specified then localAreaDensity must be -1, and vice versa. When using numActivePerInhArea, the inhibition logic will insure that at most 'numActivePerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields. stimulusThreshold: This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. synPermInactiveDec: How much an inactive synapse is decremented, specified as a percent of a fully grown synapse. synPermActiveInc: How much to increase the permanence of an active synapse, specified as a percent of a fully grown synapse. synPermActiveSharedDec: How much to decrease the permanence of an active synapse which is connected to another column that is active at the same time. Specified as a percent of a fully grown synapse. synPermOrphanDec: How much to decrease the permanence of an active synapse on a column which has high overlap with the input, but was inhibited (an "orphan" column). synPermConnected: The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing. Typical value is 0.10. Cells whose activity level before inhibition falls below minDutyCycleBeforeInh will have their own internal synPermConnectedCell threshold set below this default value. (This concept applies to both SP and TP and so 'cells' is correct here as opposed to 'columns') minPctDutyCycleBeforeInh: A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the duty cycle before inhibition of all other column within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle before inhibition falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns due to the no-dupe rule. minPctDutyCycleAfterInh: A number between 0 and 1.0, used to set a floor on how often a column should turn ON after inhibition. Periodically, each column looks at the duty cycle after inhibition of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased. dutyCyclePeriod: The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate. maxFiringBoost: The maximum firing level boost factor. Each column's raw firing strength gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxFiringBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxFiringBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. maxSSFiringBoost: Once a column turns ON, it's boost will immediately fall down to maxSSFiringBoost if it is above it. This is accomplished by internally raising it's computed duty cycle accordingly. This prevents a cell which has had it's boost raised extremely high from turning ON for too many diverse inputs in a row within a short period of time. maxSynPermBoost: The maximum synPermActiveInc boost factor. Each column's synPermActiveInc gets multiplied by a boost factor to make the column more or less likely to form new connections. The actual boost factor used is a number between 1.0 and maxSynPermBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxSynPermBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. minDistance: This parameter impacts how finely the input space is quantized. It is a value between 0 and 1.0. If set to 0, then every unique input presentation will generate a unique output representation, within the limits of the total number of columns available. Higher values will tend to group similar inputs together into the same output representation. Only column which overlap with the input less than 100*(1.0-minDistance) percent will have a possibility of losing the inhibition competition against a boosted, 'bored' cell. cloneMap: An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. numCloneMasters: The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. seed: Seed for our own pseudo-random number generator. spVerbosity: spVerbosity level: 0, 1, 2, or 3 printPeriodicStats: If > 0, then every 'printPeriodicStats' iterations, the SP will print to stdout some statistics related to learning, such as the average pct under and over-coverage, average number of active columns, etc. in the last 'showLearningStats' iterations. testMode: If True, run the SP in test mode. This runs both the C++ and python implementations on all internal functions that support both and insures that both produce the same result. globalInhibition: If true, enforce the localAreaDensity/numActivePerInhArea globally over the entire region, ignoring any dynamically calculated inhibitionRadius. In effect, this is the same as setting the inhibition radius to include the entire region. spReconstructionParam:Specifies which SP reconstruction optimization to be used. Each column's firing strength is weighted by the percent Overlap, permanence or duty Cycle if this parameter is set to 'pctOverlap', 'permanence', or 'dutycycle' respectively. If parameter is set to 'maximum_firingstrength', the maximum of the firing strengths (weighted by permanence) is used instead of the weighted sum. useHighTier: The "high tier" feature is to deal with sparse input spaces. If over (1-minDistance) percent of a column's connected synapses are active, it will automatically become one of the winning columns. If False, columns are activated based on their absolute overlap with the input. Also, boosting will be disabled to prevent pattern oscillation randomSP: If True, the SP will not update its permanences and will instead use it's initial configuration for all inferences. """ #-------------------------------------------------------------------------- # Save our __init__ args for debugging self._initArgsDict = _extractCallingMethodArgs() # Handle people instantiating us directly that don't pass in a cloneMap... # This creates a clone map without any cloning if cloneMap is None: cloneMap, numCloneMasters = fdru.makeCloneMap( columnsShape=coincidencesShape, outputCloningWidth=coincidencesShape[1], outputCloningHeight=coincidencesShape[0] ) self.numCloneMasters = numCloneMasters self._cloneMapFlat = cloneMap.reshape((-1,)) # Save creation parameters self.inputShape = (int(inputShape[0]), int(inputShape[1])) self.inputBorder = inputBorder self.inputDensity = inputDensity self.coincidencesShape = coincidencesShape self.coincInputRadius = coincInputRadius self.coincInputPoolPct = coincInputPoolPct self.gaussianDist = gaussianDist self.commonDistributions = commonDistributions self.localAreaDensity = localAreaDensity self.numActivePerInhArea = numActivePerInhArea self.stimulusThreshold = stimulusThreshold self.synPermInactiveDec = synPermInactiveDec self.synPermActiveInc = synPermActiveInc self.synPermActiveSharedDec = synPermActiveSharedDec self.synPermOrphanDec = synPermOrphanDec self.synPermConnected = synPermConnected self.minPctDutyCycleBeforeInh = minPctDutyCycleBeforeInh self.minPctDutyCycleAfterInh = minPctDutyCycleAfterInh self.dutyCyclePeriod = dutyCyclePeriod self.maxFiringBoost = maxFiringBoost self.maxSSFiringBoost = maxSSFiringBoost self.maxSynPermBoost = maxSynPermBoost self.minDistance = minDistance self.spVerbosity = spVerbosity self.printPeriodicStats = printPeriodicStats self.testMode = testMode self.globalInhibition = globalInhibition self.spReconstructionParam = spReconstructionParam self.useHighTier= useHighTier != 0 self.randomSP = randomSP != 0 if not self.useHighTier: self.minPctDutyCycleAfterInh = 0 self.fileCount = 0 self._runIter = 0 # Start at iteration #0 self._iterNum = 0 # Number of learning iterations self._inferenceIterNum = 0 # Number of inference iterations # Print creation parameters if spVerbosity >= 3: self.printParams() print "seed =", seed # Check for errors assert (self.numActivePerInhArea == -1 or self.localAreaDensity == -1) assert (self.inputShape[1] > 2*self.inputBorder) # 1D layouts have inputShape[0] == 1 if self.inputShape[0] > 1: assert (self.inputShape[0] > 2*self.inputBorder) # Calculate other member variables self._coincCount = int(self.coincidencesShape[0] * \ self.coincidencesShape[1]) self._inputCount = int(self.inputShape[0] * self.inputShape[1]) self._synPermMin = 0.0 self._synPermMax = 1.0 self._pylabInitialized = False # The rate at which we bump up all synapses in response to not passing # stimulusThreshold self._synPermBelowStimulusInc = self.synPermConnected / 10.0 self._hasTopology = True if self.inputShape[0] == 1: # 1-D layout self._coincRFShape = (1, (2*coincInputRadius + 1)) # If we only have 1 column of coincidences, then assume the user wants # each coincidence to cover the entire input if self.coincidencesShape[1] == 1: assert self.inputBorder >= (self.inputShape[1] - 1) // 2 assert coincInputRadius >= (self.inputShape[1] - 1) // 2 self._coincRFShape = (1, self.inputShape[1]) self._hasTopology = False else: # 2-D layout self._coincRFShape = ((2*coincInputRadius + 1), (2*coincInputRadius + 1)) # This gets set to True in finishLearning. Once set, we don't allow # learning anymore and delete all member variables needed only for # learning self._doneLearning = False # Init random seed self._seed(seed) # Hard-coded in the current case self.randomTieBreakingFraction = 0.5 # The permanence values used to initialize the master coincs are from # this initial permanence array # The initial permanence is gaussian shaped with mean at center and variance # carefully chosen to have connected synapses initialPermanence = self._initialPermanence() # masterPotentialM, masterPermanenceM and masterConnectedM are numpy arrays # of dimensions (coincCount, coincRfShape[0], coincRFShape[1]) # # masterPotentialM: Keeps track of the potential synapses of each # master. Potential synapses are marked as True # masterPermanenceM: Holds the permanence values of the potential synapses. # The values can range from 0.0 to 1.0 # masterConnectedM: Keeps track of the connected synapses of each # master. Connected synapses are the potential synapses # with permanence values greater than synPermConnected. self._masterPotentialM, self._masterPermanenceM = \ self._makeMasterCoincidences(self.numCloneMasters, self._coincRFShape, self.coincInputPoolPct, initialPermanence, self.random) # Update connected coincidences, the connected synapses have permanence # values greater than synPermConnected self._masterConnectedM = [] dense = numpy.zeros(self._coincRFShape) for i in xrange(self.numCloneMasters): self._masterConnectedM.append(SM_01_32_32(dense)) # coinc sizes are used in normalizing the raw overlaps self._masterConnectedCoincSizes = numpy.empty(self.numCloneMasters, 'uint32') # Make one mondo coincidence matrix for all cells at once. It has one row # per cell. The width of each row is the entire input width. There will be # ones in each row where that cell has connections. When we have cloning, # and we modify the connections for a clone master, we will update all # cells that share that clone master with the new connections. self._allConnectedM = SM_01_32_32(self._inputCount) self._allConnectedM.resize(self._coincCount, self._inputCount) # ========================================================================= # Initialize the dutyCycles and boost factors per clone master self._dutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._dutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) # TODO: We don't need to store _boostFactors, can be calculated from duty # cycle self._firingBoostFactors = numpy.ones(self.numCloneMasters, dtype=realDType) if self.useHighTier: self._firingBoostFactors *= maxFiringBoost # Selectively turn on/off C++ for various methods if self.testMode: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" else: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" # This is used to hold our learning stats (via getLearningStats()) self._learningStats = dict() # These will hold our random state, which we return from __getstate__ and # reseed our random number generators from in __setstate__ so that # a saved/restored SP produces the exact same behavior as one that # continues. This behavior allows us to write unit tests that verify that # the behavior of an SP does not change due to saving/loading from a # checkpoint self._randomState = None self._numpyRandomState = None self._nupicRandomState = None # ========================================================================= # Init ephemeral members # This also calculates the slices and global inhibitionRadius and allocates # the inhibitionObj self._initEphemerals() # ========================================================================= # If we have no cloning, make sure no column has potential or connected # synapses outside the input area if self.numCloneMasters == self._coincCount: validMask = numpy.zeros(self._coincRFShape, dtype=realDType) for masterNum in xrange(self._coincCount): coincSlice = self._coincSlices[masterNum] validMask.fill(0) validMask[coincSlice] = 1 self._masterPotentialM[masterNum].logicalAnd(SM_01_32_32(validMask)) self._masterPermanenceM[masterNum].elementMultiply(validMask) # Raise all permanences up until the number of connected is above # our desired target, self._raiseAllPermanences(masterNum, minConnections = self.stimulusThreshold / self.inputDensity) # ========================================================================= #self._cellMappings = [] #for cell in xrange(self._coincCount): # inputSlice = self._inputSlices[cell] # mapping = self._inputLayout[inputSlice] # self._cellMappings.append(mapping) # ========================================================================= # Calculate the number of connected synapses in each master coincidence now self._updateConnectedCoincidences() ############################################################################# def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ return [ '_inputLayout', '_cellsForMaster', '_columnCenters', #'_cellRFClipped', '_inputSlices', '_coincSlices', '_activeInput', '_permChanges', '_dupeInput', '_onCells', '_masterOnCells', '_onCellIndices', '_inhibitionObj', '_denseOutput', '_overlaps', '_anomalyScores', '_inputUse', '_updatePermanenceGivenInputFP', '_computeOverlapsFP', '_stats', '_rfRadiusAvg', '_rfRadiusMin', '_rfRadiusMax', '_topDownOut', '_topDownParentCounts', ] ############################################################################# def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ # Used by functions which refers to inputs in absolute space # getLearnedCM, cm,.... self._inputLayout = numpy.arange(self._inputCount, dtype=numpy.uint32).reshape(self.inputShape) # This array returns the list of cell indices that correspond to each master cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: self._cellsForMaster = [] for masterNum in xrange(self.numCloneMasters): self._cellsForMaster.append( numpy.where(self._cloneMapFlat == masterNum)[0]) else: self._cellsForMaster = None # TODO: slices are not required for the C++ helper functions # Figure out the slices of shaped input that each column sees... # Figure out the valid region of each column # The reason these slices are in initEphemerals is because numpy slices # can't be pickled self._setSlices() # This holds the output of the inhibition computation - which cells are # on after inhibition self._onCells = numpy.zeros(self._coincCount, dtype=realDType) self._masterOnCells = numpy.zeros(self.numCloneMasters, dtype=realDType) self._onCellIndices = numpy.zeros(self._coincCount, dtype='uint32') # The inhibition object gets allocated by _updateInhibitionObj() during # the first compute and re-allocated periodically during learning self._inhibitionObj = None self._rfRadiusAvg = 0 # Also calculated by _updateInhibitionObj self._rfRadiusMin = 0 self._rfRadiusMax = 0 # Used by the caller to optionally cache the dense output self._denseOutput = None # This holds the overlaps (in absolute number of connected synapses) of each # coinc with input self._overlaps = numpy.zeros(self._coincCount, dtype=realDType) # This holds the percent overlaps (number of active inputs / number of # connected synapses) of each coinc with input self._pctOverlaps = numpy.zeros(self._coincCount, dtype=realDType) # This is the value of the anomaly score for each column (after inhibition) self._anomalyScores = numpy.zeros_like(self._overlaps) # This holds the overlaps before stimulus threshold - used for verbose # messages only self._overlapsBST = numpy.zeros(self._coincCount, dtype=realDType) # This holds the number of coincs connected to an input if not self._doneLearning: self._inputUse = numpy.zeros(self.inputShape, dtype=realDType) # These are boolean matrices, the same shape as the input if not self._doneLearning: self._activeInput = numpy.zeros(self.inputShape, dtype='bool') self._dupeInput = numpy.zeros(self.inputShape, dtype='bool') # This is used to hold self.synPermActiveInc where the input is on # and -self.synPermInctiveDec where the input is off if not self._doneLearning: self._permChanges = numpy.zeros(self.inputShape, dtype=realDType) # These are used to compute and hold the output from topDownCompute # self._topDownOut = numpy.zeros(self.inputShape, dtype=realDType) # self._topDownParentCounts = numpy.zeros(self.inputShape, dtype='int') # Fill in the updatePermanenceGivenInput method pointer, which depends on # chosen language if self._updatePermanenceGivenInputImp == "py": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputPy elif self._updatePermanenceGivenInputImp == "cpp": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputCPP elif self._updatePermanenceGivenInputImp == "test": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputTest else: assert (False) # Fill in the computeOverlaps method pointer, which depends on # chosen language if self._computeOverlapsImp == "py": self._computeOverlapsFP = self._computeOverlapsPy elif self._computeOverlapsImp == "cpp": self._computeOverlapsFP = self._computeOverlapsCPP elif self._computeOverlapsImp == "test": self._computeOverlapsFP = self._computeOverlapsTest else: assert (False) # ---------------------------------------------------------------------- # These variables are used for keeping track of learning statistics (when # self.printPeriodicStats is used). self._periodicStatsCreate() ############################################################################# def compute(self, flatInput, learn=False, infer=True, computeAnomaly=False): """ Compute with the current input vector. Parameters: ---------------------------- input : the input vector (numpy array) learn : if True, adapt the input histogram based on this input infer : whether to do inference or not """ # If we are using a random SP, ignore the learn parameter if self.randomSP: learn = False # If finishLearning has been called, don't allow learning anymore if learn and self._doneLearning: raise RuntimeError("Learning can not be performed once finishLearning" " has been called.") assert (learn or infer) assert (flatInput.ndim == 1) and (flatInput.shape[0] == self._inputCount) assert (flatInput.dtype == realDType) input = flatInput.reshape(self.inputShape) # Make sure we've allocated the inhibition object lazily if self._inhibitionObj is None: self._updateInhibitionObj() # Reset first timer if self.printPeriodicStats > 0 and self._iterNum == 0: self._periodicStatsReset() # Using cloning? cloningOn = (self.numCloneMasters != self._coincCount) # If we have high verbosity, save the overlaps before stimulus threshold # so we can print them out at the end if self.spVerbosity >= 2: print "===============================================================" print "Iter:%d" % self._iterNum, "inferenceIter:%d" % \ self._inferenceIterNum self._computeOverlapsFP(input, stimulusThreshold=0) self._overlapsBST[:] = self._overlaps connectedCountsOnEntry = self._masterConnectedCoincSizes.copy() if self.spVerbosity >= 3: inputNZ = flatInput.nonzero()[0] print "active inputs: (%d)" % len(inputNZ), inputNZ # ---------------------------------------------------------------------- # TODO: Port to C++, arguments may be different - t1YXArr, # coincInputRadius,... # Calculate the raw overlap of each cell # Overlaps less than stimulus threshold are set to zero in # _calculateOverlaps # This places the result into self._overlaps self._computeOverlapsFP(input, stimulusThreshold=self.stimulusThreshold) # Save the original overlap values, before boosting, for the purpose of # anomaly detection if computeAnomaly: self._anomalyScores[:] = self._overlaps[:] if learn: # ---------------------------------------------------------------------- # Update each cell's duty cycle before inhibition # Only cells with overlaps greater stimulus threshold are considered as # active. # Stimulus threshold has already been applied # TODO: Port to C++? Loops over all coincs # Only updating is carried out here, bump up happens later onCellIndices = numpy.where(self._overlaps > 0) if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: self._onCells.fill(0) self._onCells[onCellIndices] = 1 denseOn = self._onCells # dutyCyclePeriod = self._iterNum + 1 let _dutyCycleBeforeInh # and _dutyCycleAfterInh represent real firing percentage at the # beginning of learning. This will effect boosting and let unlearned # coincidences have high boostFactor at beginning. self.dutyCyclePeriod = min(self._iterNum + 1, 1000) self._dutyCycleBeforeInh = ((self.dutyCyclePeriod-1) \ * self._dutyCycleBeforeInh + denseOn) / self.dutyCyclePeriod # ---------------------------------------------------------------------- # Compute firing levels based on boost factor and raw overlap. Update # self._overlaps in place, replacing it with the boosted overlap. We also # computes percent overlap of each column and store that into # self._pctOverlaps # With cloning if cloningOn: self._pctOverlaps[:] = self._overlaps self._pctOverlaps /= self._masterConnectedCoincSizes[self._cloneMapFlat] boostFactors = self._firingBoostFactors[self._cloneMapFlat] else: self._pctOverlaps[:] = self._overlaps potentials = self._masterConnectedCoincSizes self._pctOverlaps /= numpy.maximum(1, potentials) boostFactors = self._firingBoostFactors # To process minDistance, we do the following: # 1.) All cells which do not overlap the input "highly" (less than # minDistance), are considered to be in the "low tier" and get their # overlap multiplied by their respective boost factor. # 2.) All other cells, which DO overlap the input highly, get a "high tier # offset" added to their overlaps, and boost is not applied. The # "high tier offset" is computed as the max of all the boosted # overlaps from step #1. This insures that a cell in this high tier # will never lose to a cell from the low tier. # if self.useHighTier \ # and len(numpy.where(self._dutyCycleAfterInh == 0)[0]) == 0: # self.useHighTier = False if self.useHighTier: highTier = numpy.where(self._pctOverlaps >= (1.0 - self.minDistance))[0] else: highTier = [] someInHighTier = len(highTier) > 0 if someInHighTier: boostFactors = numpy.array(boostFactors) boostFactors[highTier] = 1.0 # apply boostFactors only in learning phase not in inference phase. if learn: self._overlaps *= boostFactors if someInHighTier: highTierOffset = self._overlaps.max() + 1.0 self._overlaps[highTier] += highTierOffset # Cache the dense output for debugging if self._denseOutput is not None: self._denseOutput = self._overlaps.copy() # ---------------------------------------------------------------------- # Incorporate inhibition and see who is firing after inhibition. # We don't need this method to process stimulusThreshold because we # already processed it. # Also, we pass in a small 'addToWinners' amount which gets added to the # winning elements as we go along. This prevents us from choosing more than # topN winners per inhibition region when more than topN elements all have # the same max high score. learnedCellsOverlaps = numpy.array(self._overlaps) if infer and not learn: # Cells that have never learnt are not allowed to win during inhibition if not self.randomSP: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = 0 else: #Boost the unlearned cells to 1000 so that the winning columns are picked randomly #From the set of unlearned columns #Boost columns that havent been learned with uniformly to 1000 so that inhibition picks #randomly from them. if self.useHighTier: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = learnedCellsOverlaps.max() + 1 # #Boost columns that are in highTier (ie. they match the input very well learnedCellsOverlaps[highTier] += learnedCellsOverlaps.max() + 1 # Small random tiebreaker for columns with equal overlap tieBreaker = numpy.random.rand(*learnedCellsOverlaps.shape).astype(realDType) learnedCellsOverlaps += 0.1 * tieBreaker numOn = self._inhibitionObj.compute( learnedCellsOverlaps, self._onCellIndices, 0.0, # stimulusThreshold max(learnedCellsOverlaps)/1000.0, # addToWinners ) self._onCells.fill(0) if numOn > 0: onCellIndices = self._onCellIndices[0:numOn] self._onCells[onCellIndices] = 1 else: onCellIndices = [] # Compute the anomaly scores only for the winning columns if computeAnomaly: self._anomalyScores *= self._onCells self._anomalyScores *= self._dutyCycleAfterInh if self.spVerbosity >= 2: print "inhRadius", self._inhibitionObj.getInhibitionRadius() print "inhLocalAreaDensity", self._inhibitionObj.getLocalAreaDensity() print "numFiring", numOn # ---------------------------------------------------------------------- # Capturing learning stats? If so, capture the cell overlap statistics if self.printPeriodicStats > 0: activePctOverlaps = self._pctOverlaps[onCellIndices] self._stats['cellPctOverlapSums'] += activePctOverlaps.sum() if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] else: onMasterIndices = onCellIndices self._stats['cellOverlapSums'] += \ (activePctOverlaps * \ self._masterConnectedCoincSizes[onMasterIndices]).sum() # ---------------------------------------------------------------------- # Compute which cells had very high overlap, but were still # inhibited. These we are calling our "orphan cells", because they are # representing an input which is already better represented by another # cell. if self.synPermOrphanDec > 0: orphanCellIndices = set(numpy.where(self._pctOverlaps >= 1.0)[0]) orphanCellIndices.difference_update(onCellIndices) else: orphanCellIndices = [] if learn: # ---------------------------------------------------------------------- # Update the number of coinc connections per input # During learning (adapting permanence values), we need to be able to # recognize dupe inputs - inputs that go two 2 or more active cells if self.synPermActiveSharedDec != 0: self._updateInputUse(onCellIndices) # ---------------------------------------------------------------------- # For the firing cells, update permanence values onMasterIndices = self._adaptSynapses(onCellIndices, orphanCellIndices, input) # ---------------------------------------------------------------------- # Increase the permanence values of columns which haven't passed # stimulus threshold of overlap with at least a minimum frequency self._bumpUpWeakCoincidences() # ---------------------------------------------------------------------- # Update each cell's after-inhibition duty cycle # TODO: As the on-cells are sparse after inhibition, we can have # a different updateDutyCycles function taking advantage of the sparsity if cloningOn: self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: denseOn = self._onCells self._dutyCycleAfterInh = ((self.dutyCyclePeriod - 1) \ * self._dutyCycleAfterInh + denseOn) / self.dutyCyclePeriod # For the cell's that just fired with a very high boost, bring their # boost back down to 1.0. This prevents a cell that was trying very # hard to grab input, and was just successful, from grabbing every # other input that comes around in the near future. # for masterNum in onMasterIndices: # if self._firingBoostFactors[masterNum] > self.maxSSFiringBoost: # self._dutyCycleAfterInh[masterNum] = self._dutyCycleAfterInh.max()/5 #Set the duty cycle to a safe margin over the boosting duty cycle but #avoid artificially boosting it to a reconstruction significant level. # self._dutyCycleAfterInh[masterNum] = self.minPctDutyCycleAfterInh*10 # ---------------------------------------------------------------------- # Update the boost factors based on firings rate after inhibition self._updateBoostFactors() # ======================================================================= # Increment iteration number and perform our periodic tasks if it's time if ((self._iterNum + 1) % 50) == 0: self._updateInhibitionObj() self._updateMinDutyCycles(self._dutyCycleBeforeInh, self.minPctDutyCycleBeforeInh, self._minDutyCycleBeforeInh) self._updateMinDutyCycles(self._dutyCycleAfterInh, self.minPctDutyCycleAfterInh, self._minDutyCycleAfterInh) # Next iteration if learn: self._iterNum += 1 if infer: self._inferenceIterNum += 1 if learn: # ======================================================================= # Capture and possibly print the periodic stats if self.printPeriodicStats > 0: self._periodicStatsComputeEnd(onCellIndices, flatInput.nonzero()[0]) # Verbose print other stats if self.spVerbosity >= 2: cloning = (self.numCloneMasters != self._coincCount) print " #connected on entry: ", fdru.numpyStr(connectedCountsOnEntry, '%d ', includeIndices=True) print " #connected on exit: ", fdru.numpyStr( self._masterConnectedCoincSizes, '%d ', includeIndices=True) if self.spVerbosity >= 3 or not cloning: print " overlaps: ", fdru.numpyStr(self._overlapsBST, '%d ', includeIndices=True, includeZeros=False) print " firing levels: ", fdru.numpyStr(self._overlaps, '%.4f ', includeIndices=True, includeZeros=False) print " on after inhibition: ", onCellIndices if not self._doneLearning: print " minDutyCycleBeforeInh:", fdru.numpyStr( self._minDutyCycleBeforeInh, '%.4f ', includeIndices=True) print " dutyCycleBeforeInh: ", fdru.numpyStr(self._dutyCycleBeforeInh, '%.4f ', includeIndices=True) print " belowMinBeforeInh: " % numpy.nonzero( self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh)[0] print " minDutyCycleAfterInh: ", fdru.numpyStr( self._minDutyCycleAfterInh, '%.4f ', includeIndices=True) print " dutyCycleAfterInh: ", fdru.numpyStr(self._dutyCycleAfterInh, '%.4f ', includeIndices=True) print " belowMinAfterInh: " % numpy.nonzero( self._dutyCycleAfterInh \ < self._minDutyCycleAfterInh)[0] print " firingBoosts: ", fdru.numpyStr(self._firingBoostFactors, '%.4f ', includeIndices=True) print elif self.spVerbosity >= 1: print "SP: learn: ", learn print "SP: active outputs(%d): " % (len(onCellIndices)), onCellIndices self._runIter += 1 # Return inference result return self._onCells ############################################################################# # def topDownCompute(self, topDownIn): # """ Top-down compute - generate expected input given output of the SP # Parameters: # ---------------------------- # topDownIn: top down input, from the level above us # retval: best estimate of the SP input that would have generated # topDownIn. # """ # #If topDownIn is not defined, generate the outputs based on the current state and overlaps of the sp # #This is meant for spatial prediction tasks. # if(topDownIn==None): # #Get highest overlaps # sortedOverlaps = numpy.sort(self._overlapsNoBoost) # #Threshold overlaps to .5 of the max overlap, this will cut out most columns that do not # #encode the input, but keep most of the columns that have been encoded with the same predicted field # halfOnThresh = numpy.where(self._overlapsNoBoost>sortedOverlaps[-1]*.5) # #keep at least the top 120 by overlap # overlapThresh = sortedOverlaps[-120] # pastOverlapThresh = numpy.where(self._overlapsNoBoost>=overlapThresh) # pastOverlapThresh = numpy.union1d(pastOverlapThresh[0], halfOnThresh[0]) # zippedOLplusDC = zip(pastOverlapThresh,self._dutyCycleAfterInh[pastOverlapThresh]) # zippedOLplusDC.sort(key=itemgetter(1), reverse=True) # selectedCols = zippedOLplusDC[0:min(40,len(zippedOLplusDC))] # pastThresh,dutyCycles = zip(*selectedCols) # topDownIn = numpy.zeros(self._overlaps.shape) # topDownIn[list(pastThresh)] = 1 # # Init topdown out. This is shaped to the input # topDownOut = self._topDownOut # self._topDownOut.fill(0) # self._topDownParentCounts.fill(0) # # ========================================================================= # # Get the contributions from each of the active cells to the inputs. # # We compute the average contribution to each input from each of the # # active cells that connects to it. # # If topDownIn is not flat, flatten it # if len(topDownIn.shape) > 1: # topDownIn = topDownIn.reshape(-1) # activeCells = topDownIn.nonzero()[0] # if self.spReconstructionParam == "dutycycle": # maxDutyCycle = max(1-self._dutyCycleAfterInh) # if self.spReconstructionParam == "pctoverlap": # maxPctOverlap = max(self._pctOverlaps) # # From each output, get the expected input that generated it # cloningOn = (self.numCloneMasters != self._coincCount) # for cell in activeCells: # if cloningOn: # masterNum = self._cloneMapFlat[cell] # else: # masterNum = cell # # Get the permanences for this master # activeInputs = self._masterConnectedM[masterNum].toDense() # # Add the connected inputs to the topDownOut # inputSlice = self._inputSlices[cell] # coincSlice = self._coincSlices[cell] # # Weight each connected input by the cell's firing strength # if self.spReconstructionParam == "unweighted_mean": # topDownOut[inputSlice] += topDownIn[cell] * activeInputs[coincSlice] # elif self.spReconstructionParam == "permanence": # maxPermanence = max(self._masterPermanenceM[cell].getRow(0)) # topDownOut[inputSlice] += topDownIn[cell] * activeInputs[coincSlice] * \ # (0.5 + self._masterPermanenceM[cell].getRow(0)/maxPermanence*0.5) # elif self.spReconstructionParam == "pctoverlap": # topDownOut[inputSlice] += topDownIn[cell] * activeInputs[coincSlice] * \ # (0.5 + self._pctOverlaps[cell]/maxPctOverlap*0.5) # elif self.spReconstructionParam == "dutycycle": # topDownOut[inputSlice] += topDownIn[cell] * activeInputs[coincSlice] * \ # (0.5 + (1-(self._dutyCycleAfterInh[cell]/maxDutyCycle))*0.5) # elif self.spReconstructionParam == "maximum_firingstrength": # maxPermanence = max(self._masterPermanenceM[cell].getRow(0)) # strength = topDownIn[cell]*activeInputs[coincSlice] * \ # (0.5 + self._masterPermanenceM[cell].getRow(0)/maxPermanence*0.5) # topDownOut[inputSlice] = numpy.maximum(topDownOut[inputSlice], # strength) # # Bump up the parent counts for these inputs # self._topDownParentCounts[inputSlice] += activeInputs[coincSlice] # # Old method of normalizing # # Divide each input's accumulated weight by it's number of parents # # numpy.clip(self._topDownParentCounts, 1.0, numpy.inf, # # self._topDownParentCounts) # # topDownOut /= self._topDownParentCounts # if "maximum_firingstrength" not in self.spReconstructionParam: # topDownInTotal = topDownIn.sum() # if topDownInTotal: # topDownOut /= topDownInTotal # return topDownOut.reshape(-1) # return topDownIn.copy() ############################################################################# def __getstate__(self): # Update our random states self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() state = self.__dict__.copy() # Delete ephemeral members that we don't want pickled for ephemeralMemberName in self._getEphemeralMembers(): if ephemeralMemberName in state: del state[ephemeralMemberName] return state ############################################################################# def __setstate__(self, state): self.__dict__.update(state) # ---------------------------------------------------------------------- # Support older checkpoints # These fields were added on 2010-10-05 and _iterNum was preserved if not hasattr(self, '_randomState'): self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() self._iterNum = 0 # ------------------------------------------------------------------------ # Init our random number generators random.setstate(self._randomState) numpy.random.set_state(self._numpyRandomState) self.random.setState(self._nupicRandomState) # Load things that couldn't be pickled... self._initEphemerals() ############################################################################ def getAnomalyScore(self): """ Get the aggregate anomaly score for this input pattern Returns: A single scalar value for the anomaly score """ numNonzero = len(numpy.nonzero(self._anomalyScores)[0]) return 1.0 / (numpy.sum(self._anomalyScores) + 1) ############################################################################# def getLearningStats(self): """ Return a dictionary containing a set of statistics related to learning. Here is a list of what is returned: 'activeCountAvg': The average number of active columns seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'underCoveragePct': The average under-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'overCoveragePct': The average over-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesAvg': The overall average number of connection changes made per active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMin': The minimum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMax': The maximum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'rfSize': The average receptive field size of the columns. 'inhibitionRadius': The average inihbition radius of the columns. 'targetDensityPct': The most recent target local area density used, as a percent (0 -> 100) 'coincidenceSizeAvg': The average learned coincidence size 'coincidenceSizeMin': The minimum learned coincidence size 'coincidenceSizeMax': The maximum learned coincidence size 'dcBeforeInhibitionAvg': The average of duty cycle before inhbition of all coincidences 'dcBeforeInhibitionMin': The minimum duty cycle before inhbition of all coincidences 'dcBeforeInhibitionAvg': The maximum duty cycle before inhbition of all coincidences 'dcAfterInhibitionAvg': The average of duty cycle after inhbition of all coincidences 'dcAfterInhibitionMin': The minimum duty cycle after inhbition of all coincidences 'dcAfterInhibitionAvg': The maximum duty cycle after inhbition of all coincidences 'firingBoostAvg': The average firing boost 'firingBoostMin': The minimum firing boost 'firingBoostMax': The maximum firing boost """ # Fill in the stats that can be computed on the fly. The transient stats # that depend on printPeriodicStats being on, have already been stored self._learningStats['rfRadiusAvg'] = self._rfRadiusAvg self._learningStats['rfRadiusMin'] = self._rfRadiusMin self._learningStats['rfRadiusMax'] = self._rfRadiusMax if self._inhibitionObj is not None: self._learningStats['inhibitionRadius'] = \ self._inhibitionObj.getInhibitionRadius() self._learningStats['targetDensityPct'] = \ 100.0 * self._inhibitionObj.getLocalAreaDensity() else: print "Warning: No inhibitionObj found for getLearningStats" self._learningStats['inhibitionRadius'] = 0.0 self._learningStats['targetDensityPct'] = 0.0 self._learningStats['coincidenceSizeAvg'] = \ self._masterConnectedCoincSizes.mean() self._learningStats['coincidenceSizeMin'] = \ self._masterConnectedCoincSizes.min() self._learningStats['coincidenceSizeMax'] = \ self._masterConnectedCoincSizes.max() if not self._doneLearning: self._learningStats['dcBeforeInhibitionAvg'] = \ self._dutyCycleBeforeInh.mean() self._learningStats['dcBeforeInhibitionMin'] = \ self._dutyCycleBeforeInh.min() self._learningStats['dcBeforeInhibitionMax'] = \ self._dutyCycleBeforeInh.max() self._learningStats['dcAfterInhibitionAvg'] = \ self._dutyCycleAfterInh.mean() self._learningStats['dcAfterInhibitionMin'] = \ self._dutyCycleAfterInh.min() self._learningStats['dcAfterInhibitionMax'] = \ self._dutyCycleAfterInh.max() self._learningStats['firingBoostAvg'] = \ self._firingBoostFactors.mean() self._learningStats['firingBoostMin'] = \ self._firingBoostFactors.min() self._learningStats['firingBoostMax'] = \ self._firingBoostFactors.max() return self._learningStats ############################################################################# def _seed(self, seed=-1): """ Initialize the random seed """ if seed != -1: self.random = NupicRandom(seed) random.seed(seed) numpy.random.seed(seed) else: self.random = NupicRandom() ############################################################################# def _initialPermanence(self): """ Create and return a 2D matrix filled with initial permanence values. The returned matrix will be of shape: (2*coincInputRadius + 1, 2*coincInputRadius + 1). The initial permanence values are set between 0 and 1.0, with enough chosen above synPermConnected to make it highly likely that a cell will pass stimulusThreshold, given the size of the potential RF, the input pool sampling percentage, and the expected density of the active inputs. If gaussianDist is True, the center of the matrix will contain the highest permanence values and lower values will be farther from the center. If gaussianDist is False, the highest permanence values will be evenly distributed throughout the potential RF. """ # Figure out the target number of connected synapses. We want about 2X # stimulusThreshold minOn = 2 * max(self.stimulusThreshold, 10) / self.coincInputPoolPct \ / self.inputDensity # ======================================================================== # Get the gaussian distribution, with max magnitude just slightly above # synPermConnected. Try to find a sigma that gives us about 2X # stimulusThreshold connected synapses after sub-sampling for # coincInputPoolPct. We will assume everything within +/- sigma will be # connected. This logic uses the fact that an x value of sigma generates a # magnitude of 0.6. if self.gaussianDist: # Only supported when we have 2D layouts if self._coincRFShape[0] != self._coincRFShape[1]: raise RuntimeError("Gaussian distibuted permanences are currently only" "supported for 2-D layouts") # The width and height of the center "blob" in inputs is the square root # of the area onAreaDim = numpy.sqrt(minOn) # Sigma is at the edge of the center blob sigma = onAreaDim/2 # Create the gaussian with a value of 1.0 at the center perms = self._gaussianMatrix(dim=max(self._coincRFShape), sigma=sigma) # The distance between the min and max values within the gaussian will # be given by 'grange'. In a gaussian, the value at sigma away from the # center is 0.6 * the value at the center. We want the values at sigma # to be synPermConnected maxValue = 1.0 / 0.6 * self.synPermConnected perms *= maxValue perms.shape = (-1,) # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # farther away than 2 sigma to 0. The value of a gaussing at 2 sigma # is 0.135 * the value at the center perms[perms < (0.135 * maxValue)] = 0 # ======================================================================== # Evenly distribute the permanences through the RF else: # Create a random distribution from 0 to 1. perms = numpy.random.random(self._coincRFShape) perms = perms.astype(realDType) # Set the range of values to be between 0 and # synPermConnected+synPermInctiveDec. This ensures that a pattern # will always be learned in 1 iteration maxValue = min(1.0, self.synPermConnected + self.synPermInactiveDec) # What percentage do we want to be connected? connectPct = 0.50 # What value from the 0 to 1 distribution will map to synPermConnected? threshold = 1.0 - connectPct # Which will be the connected and unconnected synapses? connectedSyns = perms >= threshold unconnectedSyns = numpy.logical_not(connectedSyns) # Squeeze all values between threshold and 1.0 to be between # synPermConnected and synPermConnected + synPermActiveInc / 4 # This makes sure the firing coincidence perms matching input bit get # greater than synPermConnected and other unconnectedSyns get deconnected # in one firing learning iteration. srcOffset = threshold srcRange = 1.0 - threshold dstOffset = self.synPermConnected dstRange = maxValue - self.synPermConnected perms[connectedSyns] = (perms[connectedSyns] - srcOffset)/srcRange \ * dstRange / 4.0 + dstOffset # Squeeze all values between 0 and threshold to be between 0 and # synPermConnected srcRange = threshold - 0.0 dstRange = self.synPermConnected - 0.0 perms[unconnectedSyns] = perms[unconnectedSyns]/srcRange \ * dstRange # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # below synPermActiveInc/2 to 0 perms[perms < (self.synPermActiveInc / 2.0)] = 0 perms.shape = (-1,) return perms ############################################################################# def _gaussianMatrix(self, dim, sigma): """ Create and return a 2D matrix filled with a gaussian distribution. The returned matrix will be of shape (dim, dim). The mean of the gaussian will be in the center of the matrix and have a value of 1.0. """ gaussian = lambda x,sigma: numpy.exp(-(x**2)/(2*(sigma**2))) # Allocate the matrix m = numpy.empty((dim, dim), dtype=realDType) # Find the center center = (dim - 1) / 2.0 # TODO: Simplify using numpy.meshgrid # Fill it in for y in xrange(dim): for x in xrange(dim): dist = numpy.sqrt((x-center)**2 + (y-center)**2) m[y,x] = gaussian(dist, sigma) return m ############################################################################# def _makeMasterCoincidences(self, numCloneMasters, coincRFShape, coincInputPoolPct, initialPermanence=None, nupicRandom=None): """Make the master coincidence matrices and mater input histograms. # TODO: Update this example >>> FDRCSpatial._makeMasterCoincidences(1, 2, 0.33) (array([[[ True, True, False, False, False], [False, True, False, False, True], [False, True, False, False, False], [False, False, False, True, False], [ True, False, False, False, False]]], dtype=bool), array([[[ 0.26982325, 0.19995725, 0. , 0. , 0. ], [ 0. , 0.94128972, 0. , 0. , 0.36316112], [ 0. , 0.06312726, 0. , 0. , 0. ], [ 0. , 0. , 0. , 0.29740077, 0. ], [ 0.81071907, 0. , 0. , 0. , 0. ]]], dtype=float32)) """ if nupicRandom is None: nupicRandom = NupicRandom(42) if initialPermanence is None: initialPermanence = self._initialPermanence() coincRfArea = (coincRFShape[0] * coincRFShape[1]) coincInputPool = coincInputPoolPct * coincRfArea # We will generate a list of sparse matrices masterPotentialM = [] masterPermanenceM = [] toSample = numpy.arange(coincRfArea, dtype='uint32') toUse = numpy.empty(coincInputPool, dtype='uint32') #denseM = numpy.zeros(coincRfArea, dtype='uint32') denseM = numpy.zeros(coincRfArea, dtype=realDType) for i in xrange(numCloneMasters): nupicRandom.getUInt32Sample(toSample, toUse) # Put in 1's into the potential locations denseM.fill(0) denseM[toUse] = 1 masterPotentialM.append(SM_01_32_32(denseM.reshape(coincRFShape))) # Put in the initial permanences denseM *= initialPermanence masterPermanenceM.append(SM32(denseM.reshape(coincRFShape))) # If we are not using common initial permanences, create another # unique one for the next cell if not self.commonDistributions: initialPermanence = self._initialPermanence() return masterPotentialM, masterPermanenceM ############################################################################# def _updateConnectedCoincidences(self, masters=None): """Update 'connected' version of the given coincidence. Each 'connected' coincidence is effectively a binary matrix (AKA boolean) matrix that is the same size as the input histogram matrices. They have a 1 wherever the inputHistogram is "above synPermConnected". """ # If no masterNum given, update all of them if masters is None: masters = xrange(self.numCloneMasters) (nCellRows, nCellCols) = self._coincRFShape cloningOn = (self.numCloneMasters != self._coincCount) for masterNum in masters: # Where are we connected? masterConnectedNZ = \ self._masterPermanenceM[masterNum].whereGreaterEqual(0, nCellRows, 0, nCellCols, self.synPermConnected) rowIdxs = masterConnectedNZ[:,0] colIdxs = masterConnectedNZ[:,1] self._masterConnectedM[masterNum].setAllNonZeros(nCellRows, nCellCols, rowIdxs, colIdxs) self._masterConnectedCoincSizes[masterNum] = len(rowIdxs) # Update the corresponding rows in the super, mondo connected matrix that # come from this master masterConnected = \ self._masterConnectedM[masterNum].toDense().astype('bool') # 0.2s if cloningOn: cells = self._cellsForMaster[masterNum] else: cells = [masterNum] for cell in cells: inputSlice = self._inputSlices[cell] coincSlice = self._coincSlices[cell] masterSubset = masterConnected[coincSlice] sparseCols = self._inputLayout[inputSlice][masterSubset] self._allConnectedM.replaceSparseRow(cell, sparseCols) # 4s. ############################################################################# def _setSlices(self): """Compute self._columnSlices and self._inputSlices self._inputSlices are used to index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. There is one item in the list for each column. self._coincSlices are used to index into the coinc (assuming it's been shaped to a 2D array) to get the valid area of the column. There is one item in the list for each column. This function is called upon unpickling, since we can't pickle slices. """ self._columnCenters = numpy.array(self._computeCoincCenters(self.inputShape, self.coincidencesShape, self.inputBorder)) coincInputRadius = self.coincInputRadius (coincHeight, coincWidth) = self._coincRFShape inputShape = self.inputShape inputBorder = self.inputBorder # --------------------------------------------------------------------- # Compute the input slices for each cell. This is the slice of the entire # input which intersects with the cell's permanence matrix. if self._hasTopology: self._inputSlices = [numpy.s_[max(0, cy-coincInputRadius): \ min(inputShape[0], cy+coincInputRadius + 1), max(0, cx-coincInputRadius): \ min(inputShape[1], cx+coincInputRadius + 1)] for (cy, cx) in self._columnCenters] else: self._inputSlices = [numpy.s_[0:inputShape[0], 0:inputShape[1]] for (cy, cx) in self._columnCenters] self._inputSlices2 = numpy.zeros((4*len(self._inputSlices)), dtype="uint32") k = 0 for i in range(len(self._inputSlices)): self._inputSlices2[k] = self._inputSlices[i][0].start self._inputSlices2[k + 1] = self._inputSlices[i][0].stop self._inputSlices2[k + 2] = self._inputSlices[i][1].start self._inputSlices2[k + 3] = self._inputSlices[i][1].stop k = k + 4 # --------------------------------------------------------------------- # Compute the coinc slices for each cell. This is which portion of the # cell's permanence matrix intersects with the input. if self._hasTopology: if self.inputShape[0] > 1: self._coincSlices = [numpy.s_[max(0, coincInputRadius - cy): \ min(coincHeight, coincInputRadius + inputShape[0] - cy), max(0, coincInputRadius-cx): \ min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:1, max(0, coincInputRadius-cx): \ min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:coincHeight, 0:coincWidth] for (cy, cx) in self._columnCenters] self._coincSlices2 = numpy.zeros((4*len(self._coincSlices)), dtype="uint32") k = 0 for i in range(len(self._coincSlices)): self._coincSlices2[k] = self._coincSlices[i][0].start self._coincSlices2[k + 1] = self._coincSlices[i][0].stop self._coincSlices2[k + 2] = self._coincSlices[i][1].start self._coincSlices2[k + 3] = self._coincSlices[i][1].stop k = k + 4 ############################################################################# @staticmethod def _computeCoincCenters(inputShape, coincidencesShape, inputBorder): """Compute the centers of all coincidences, given parameters. This function is semi-public: tools may use it to generate good visualizations of what the FDRCSpatial node is doing. NOTE: It must be static or global function so that it can be called by the ColumnActivityTab inspector *before* the first compute (before the SP has been constructed). If the input shape is (7,20), shown below with * for each input ******************** ******************** ******************** ******************** ******************** ******************** ******************** if inputBorder is 1, we distribute the coincidences evenly over the the area after removing the edges, @ shows the allowed input area below ******************** *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* ******************** each coincidence is centered at the closest @ and looks at a area with coincInputRadius below it This function call returns an iterator over the coincidence centers. Each element in iterator is a tuple: (y, x). The iterator returns elements in a fixed order. """ # Determine Y centers if inputShape[0] > 1: # 2-D layout startHeight = inputBorder stopHeight = inputShape[0] - inputBorder else: startHeight = stopHeight = 0 heightCenters = numpy.linspace(startHeight, stopHeight, coincidencesShape[0], endpoint=False).astype('int32') # Determine X centers startWidth = inputBorder stopWidth = inputShape[1] - inputBorder widthCenters = numpy.linspace(startWidth, stopWidth, coincidencesShape[1], endpoint=False).astype('int32') return list(cross(heightCenters, widthCenters)) ############################################################################# def _updateInhibitionObj(self): """ Calculate the average inhibitionRadius to use and update the inhibition object accordingly. This looks at the size of the average connected receptive field and uses that to determine the inhibition radius. """ # ======================================================================== # Compute the inhibition radius. # If using global inhibition, just set it to include the entire region if self.globalInhibition: avgRadius = max(self.coincidencesShape) # Else, set it based on the average size of the connected synapses area in # each cell. else: totalDim = 0 # Get the dimensions of the connected receptive fields of each cell to # compute the average minDim = numpy.inf maxDim = 0 for masterNum in xrange(self.numCloneMasters): masterConnected = self._masterConnectedM[masterNum] nzs = masterConnected.getAllNonZeros() (rows, cols) = zip(*nzs) rows = numpy.array(rows) cols = numpy.array(cols) if len(rows) >= 2: height = rows.max() - rows.min() + 1 else: height = 1 if len(cols) >= 2: width = cols.max() - cols.min() + 1 else: width = 1 avgDim = (height + width) / 2.0 minDim = min(minDim, avgDim) maxDim = max(maxDim, avgDim) totalDim += avgDim # Get average width/height in input space avgDim = totalDim / self.numCloneMasters self._rfRadiusAvg = (avgDim - 1.0) / 2.0 self._rfRadiusMin = (minDim - 1.0) / 2.0 self._rfRadiusMax = (maxDim - 1.0) / 2.0 # How many columns in cell space does it correspond to? if self.inputShape[0] > 1: # 2-D layout coincsPerInputX = float(self.coincidencesShape[1]) \ / (self.inputShape[1] - 2*self.inputBorder) coincsPerInputY = float(self.coincidencesShape[0]) \ / (self.inputShape[0] - 2*self.inputBorder) else: coincsPerInputX = coincsPerInputY = \ float(self.coincidencesShape[1] * self.coincidencesShape[0]) \ / (self.inputShape[1] - 2*self.inputBorder) avgDim *= (coincsPerInputX + coincsPerInputY) / 2 avgRadius = (avgDim - 1.0) / 2.0 avgRadius = max(1.0, avgRadius) # Can't be greater than the overall width or height of the level maxDim = max(self.coincidencesShape) avgRadius = min(avgRadius, maxDim) avgRadius = int(round(avgRadius)) # ======================================================================== # Is there a need to re-instantiate the inhibition object? if self._inhibitionObj is None \ or self._inhibitionObj.getInhibitionRadius() != avgRadius: # What is our target density? if self.localAreaDensity > 0: localAreaDensity = self.localAreaDensity else: numCellsPerInhArea = (avgRadius * 2.0 + 1.0) ** 2 totalCells = self.coincidencesShape[0] * self.coincidencesShape[1] numCellsPerInhArea = min(numCellsPerInhArea, totalCells) localAreaDensity = float(self.numActivePerInhArea) / numCellsPerInhArea # Don't let it be greater than 0.50 localAreaDensity = min(localAreaDensity, 0.50) if self.spVerbosity >= 2: print "Updating inhibition object:" print " avg. rfRadius:", self._rfRadiusAvg print " avg. inhRadius:", avgRadius print " Setting density to:", localAreaDensity self._inhibitionObj = Inhibition2(self.coincidencesShape[0], # height self.coincidencesShape[1], # width avgRadius, # inhRadius localAreaDensity) # density ############################################################################# def _updateMinDutyCycles(self, actDutyCycles, minPctDutyCycle, minDutyCycles): """ Calculate and update the minimum acceptable duty cycle for each cell based on the duty cycles of the cells within its inhibition radius and the minPctDutyCycle. Parameters: ----------------------------------------------------------------------- actDutyCycles: The actual duty cycles of all cells minPctDutyCycle: Each cell's minimum duty cycle will be set to minPctDutyCycle times the duty cycle of the most active cell within its inhibition radius minDutyCycles: This array will be updated in place with the new minimum acceptable duty cycles """ # What is the inhibition radius? inhRadius = self._inhibitionObj.getInhibitionRadius() # Reshape the actDutyCycles to match the topology of the level cloningOn = (self.numCloneMasters != self._coincCount) if not cloningOn: actDutyCycles = actDutyCycles.reshape(self.coincidencesShape) minDutyCycles = minDutyCycles.reshape(self.coincidencesShape) # Special, faster handling when inhibition radius includes the entire # set of cells if cloningOn or inhRadius >= max(self.coincidencesShape): minDutyCycle = minPctDutyCycle * actDutyCycles.max() minDutyCycles.fill(minPctDutyCycle * actDutyCycles.max()) # Else, process each cell else: (numRows, numCols) = self.coincidencesShape for row in xrange(numRows): top = max(0, row - inhRadius) bottom = min(row + inhRadius + 1, numRows) for col in xrange(numCols): left = max(0, col - inhRadius) right = min(col + inhRadius + 1, numCols) maxDutyCycle = actDutyCycles[top:bottom, left:right].max() minDutyCycles[row, col] = maxDutyCycle * minPctDutyCycle if self.spVerbosity >= 2: print "Actual duty cycles:" print fdru.numpyStr(actDutyCycles, '%.4f') print "Recomputed min duty cycles, using inhRadius of", inhRadius print fdru.numpyStr(minDutyCycles, '%.4f') ############################################################################# def _computeOverlapsPy(self, inputShaped, stimulusThreshold): """ Computes overlaps for every column for the current input in place. The overlaps less than stimulus threshold are set to zero here. For columns with input RF going off the edge of input field, only regions within the input field are considered. This is equivalent to padding the input field with zeros. Parameters: ------------------------------------------------------------------------ inputShaped: input at the current time step, shaped to the input topology stimulusThreshold: stimulusThreshold to use Member variables used/updated: ------------------------------------------------------------------------ _inputSlices: Index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. _coincSlices: Index into the coinc (assuming it's been shaped to a 2D array) to get the valid region of each column. _overlaps: Result is placed into this array which holds the overlaps of each column with the input """ flatInput = inputShaped.reshape(-1) self._allConnectedM.rightVecSumAtNZ_fast(flatInput, self._overlaps) # Apply stimulusThreshold # TODO: Is there a faster numpy operation for this? self._overlaps[self._overlaps < stimulusThreshold] = 0 self._overlapsNoBoost = self._overlaps.copy() ############################################################################# def _computeOverlapsCPP(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but using a C++ implementation. """ cpp_overlap(self._cloneMapFlat, self._inputSlices2, self._coincSlices2, inputShaped, self._masterConnectedM, stimulusThreshold, self._overlaps); ############################################################################# def _computeOverlapsTest(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but compares the python and C++ implementations. """ # Py version self._computeOverlapsPy(inputShaped, stimulusThreshold) overlaps2 = copy.deepcopy(self._overlaps) # C++ version self._computeOverlapsCPP(inputShaped, stimulusThreshold) if (abs(self._overlaps - overlaps2) > 1e-6).any(): print self._overlaps, overlaps2, abs(self._overlaps - overlaps2) import pdb; pdb.set_trace() sys.exit(0) ############################################################################# def _raiseAllPermanences(self, masterNum, minConnections=None, densePerm=None, densePotential=None): """ Raise all permanences of the given master. If minConnections is given, the permanences will be raised until at least minConnections of them are connected strength. If minConnections is left at None, all permanences will be raised by self._synPermBelowStimulusInc. After raising all permanences, we also "sparsify" the permanence matrix and set to 0 any permanences which are already very close to 0, this keeps the memory requirements of the sparse matrices used to store the permanences lower. Parameters: ---------------------------------------------------------------------------- masterNum: Which master to bump up minConnections: Desired number of connected synapses to have If None, then all permanences are simply bumped up by self._synPermBelowStimulusInc densePerm: The dense representation of the master's permanence matrix, if available. If not specified, we will create this from the stored sparse representation. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. The stored sparse matrix will ALWAYS be updated from the densePerm if the densePerm is provided. densePotential: The dense representation of the master's potential synapses matrix, if available. If not specified, we will create this from the stored sparse potential matrix. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. retval: (modified, numConnections) modified: True if any permanences were raised numConnections: Number of actual connected synapses (not computed if minConnections was None, so None is returned in that case.) """ # It's faster to perform this operation on the dense matrices and # then convert to sparse once we're done since we will be potentially # introducing and then later removing a bunch of non-zeros. # ------------------------------------------------------------------- # Get references to the sparse perms and potential syns for this master sparsePerm = self._masterPermanenceM[masterNum] sparsePotential = self._masterPotentialM[masterNum] # We will trim off all synapse permanences below this value to 0 in order # to keep the memory requirements of the SparseMatrix lower trimThreshold = self.synPermActiveInc / 2.0 # ------------------------------------------------------------------- # See if we already have the required number of connections. If we don't, # get the dense form of the permanences if we don't have them already if densePerm is None: # See if we already have enough connections, if so, we can avoid the # overhead of converting to dense if minConnections is not None: numConnected = sparsePerm.countWhereGreaterEqual( 0, self._coincRFShape[0], 0, self._coincRFShape[1], self.synPermConnected) if numConnected >= minConnections: return (False, numConnected) densePerm = self._masterPermanenceM[masterNum].toDense() elif minConnections is not None: numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (False, numConnected) # Get the dense form of the potential synapse locations if densePotential is None: densePotential = self._masterPotentialM[masterNum].toDense() # ------------------------------------------------------------------- # Form the array with the increments incrementM = densePotential.astype(realDType) incrementM *= self._synPermBelowStimulusInc # ------------------------------------------------------------------- # Increment until we reach our target number of connections assert (densePerm.dtype == realDType) while True: densePerm += incrementM if minConnections is None: numConnected = None break numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: break # ------------------------------------------------------------------- # Convert back to sparse form and trim any values that are already # close to zero sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (True, numConnected) ############################################################################# def _bumpUpWeakCoincidences(self): """ This bump-up ensures every coincidence have non-zero connections. We find all coincidences which have overlaps less than stimulus threshold. We add synPermActiveInc to all the synapses. This step when repeated over time leads to synapses crossing synPermConnected threshold. """ # Update each cell's connected threshold based on the duty cycle before # inhibition. The connected threshold is linearly interpolated # between the points (dutyCycle:0, thresh:0) and (dutyCycle:minDuty, # thresh:synPermConnected). This is a line defined as: y = mx + b # thresh = synPermConnected/minDuty * dutyCycle bumpUpList = (self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh).nonzero()[0] for master in bumpUpList: self._raiseAllPermanences(master) # Update the connected synapses for each master we touched self._updateConnectedCoincidences(bumpUpList) if self.spVerbosity >= 2 and len(bumpUpList) > 0: print "Bumping up permanences in following cells due to falling below" \ "minDutyCycleBeforeInh:", bumpUpList ############################################################################# def _updateBoostFactors(self): """ Update the boost factors. The boost factors is linearly interpolated between the points (dutyCycle:0, boost:maxFiringBoost) and (dutyCycle:minDuty, boost:1.0). This is a line defined as: y = mx + b boost = (1-maxFiringBoost)/minDuty * dutyCycle + maxFiringBoost Parameters: ------------------------------------------------------------------------ boostFactors: numpy array of boost factors, defined per master """ if self._minDutyCycleAfterInh.sum() > 0: self._firingBoostFactors = (1 - self.maxFiringBoost)\ /self._minDutyCycleAfterInh * self._dutyCycleAfterInh \ + self.maxFiringBoost self._firingBoostFactors[self._dutyCycleAfterInh \ > self._minDutyCycleAfterInh] = 1.0 # if self._dutyCycleAfterInh.min() == 0: # where there are unlearned coincs # self._firingBoostFactors[self._dutyCycleAfterInh == 0] = \ # self.maxFiringBoost ############################################################################# def _updateInputUse(self, onCellIndices): """ During learning (adapting permanence values), we need to be able to tell which inputs are going to 2 or more active cells at once. We step through each coinc and mark all the inputs it is connected to. The inputUse array acts as a counter for the number of connections to the coincs from each input. Parameters: ------------------------------------------------------------------------ inputUse: numpy array of number of coincs connected to each input """ allConnected = SM32(self._allConnectedM) # TODO: avoid this copy self._inputUse[:] = allConnected.addListOfRows( onCellIndices).reshape(self.inputShape) ############################################################################# def _adaptSynapses(self, onCellIndices, orphanCellIndices, input): """ This is the main function in learning of SP. The permanence values are changed based on the learning rules. Parameters: ------------------------------------------------------------------------ onCellIndices: columns which are turned on after inhibition. The permanence values of these coincs are adapted based on the input. orphanCellIndices: columns which had very high overlap with the input, but ended up being inhibited input: Input, shaped to the input topology retval: list of masterCellIndices that were actually updated, or None if cloning is off """ # Capturing learning stats? if self.printPeriodicStats > 0: self._stats['explainedInputsCurIteration'] = set() # Precompute the active, inactive, and dupe inputs up front for speed # TODO: put these into pre-allocated arrays for speed self._activeInput[:] = input # Create a matrix containing the default permanence deltas for each input self._permChanges.fill(-1 * self.synPermInactiveDec) self._permChanges[self._activeInput] = self.synPermActiveInc if self.synPermActiveSharedDec != 0: numpy.logical_and(self._activeInput, self._inputUse>1, self._dupeInput) self._permChanges[self._dupeInput] -= self.synPermActiveSharedDec # Cloning? If so, scramble the onCells so that we pick a random one to # update for each master. We only update a master cell at most one time # per input presentation. cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: # Scramble the onCellIndices so that we pick a random one to update onCellIndices = list(onCellIndices) random.shuffle(onCellIndices) visitedMasters = set() # For the firing cells, update permanence values for columnNum in itertools.chain(onCellIndices, orphanCellIndices): # Get the master number masterNum = self._cloneMapFlat[columnNum] # If cloning, only visit each master once if cloningOn: if masterNum in visitedMasters: continue visitedMasters.add(masterNum) # Get the slices of input that overlap with the valid area of this master inputSlice = self._inputSlices[columnNum] rfActiveInput = self._activeInput[inputSlice] rfPermChanges = self._permChanges[inputSlice] # Get the potential synapses, permanence values, and connected synapses # for this master masterPotential = self._masterPotentialM[masterNum].toDense() masterPermanence = self._masterPermanenceM[masterNum].toDense() masterConnected = \ self._masterConnectedM[masterNum].toDense().astype('bool') # Make changes only over the areas that overlap the input level. For # coincidences near the edge of the level for example, this excludes the # synapses outside the edge. coincSlice = self._coincSlices[columnNum] masterValidPermanence= masterPermanence[coincSlice] # Capturing learning stats? if self.printPeriodicStats > 0: masterValidConnected = masterConnected[coincSlice] explainedInputs = self._inputLayout[inputSlice][masterValidConnected] self._stats['explainedInputsCurIteration'].update(explainedInputs) if self.spVerbosity >= 3: print " adapting cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " initialConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) print " firingLevel: %d" % (self._overlaps[columnNum]) print " firingBoostFactor: %f" % (self._firingBoostFactors[masterNum]) print " input slice: \n" self._printInputSlice(rfActiveInput, prefix=' ') # Update permanences given the active input (NOTE: The "FP" in this # function name stands for "Function Pointer"). if columnNum in orphanCellIndices: # Decrease permanence of active inputs masterValidPermanence[rfActiveInput] -= self.synPermOrphanDec else: self._updatePermanenceGivenInputFP(columnNum, masterNum, input, self._inputUse, masterPermanence, masterValidPermanence, rfActiveInput, rfPermChanges) # Clip to absolute min and max permanence values numpy.clip(masterPermanence, self._synPermMin, self._synPermMax, out=masterPermanence) # Keep only the potential syns for this cell numpy.multiply(masterPermanence, masterPotential, masterPermanence) # If we are tracking learning stats, prepare to see how many changes # were made to the cell connections if self.printPeriodicStats > 0: masterConnectedOrig = SM_01_32_32(self._masterConnectedM[masterNum]) # --------------------------------------------------------------------- # If the number of connected synapses happens to fall below # stimulusThreshold, bump up all permanences a bit. # We could also just wait for the "duty cycle falls below # minDutyCycleBeforeInb" logic to catch it, but doing it here is # pre-emptive and much faster. # # The "duty cycle falls below minDutyCycleBeforeInb" logic will still # catch other possible situations, like: # * if the set of inputs a cell learned suddenly stop firing due to # input statistic changes # * damage to the level below # * input is very sparse and we still don't pass stimulusThreshold even # with stimulusThreshold conneted synapses. self._raiseAllPermanences(masterNum, minConnections=self.stimulusThreshold, densePerm=masterPermanence, densePotential=masterPotential) # Update the matrices that contain the connected syns for this cell. self._updateConnectedCoincidences([masterNum]) # If we are tracking learning stats, see how many changes were made to # this cell's connections if self.printPeriodicStats > 0: origNumConnections = masterConnectedOrig.nNonZeros() masterConnectedOrig.logicalAnd(self._masterConnectedM[masterNum]) numUnchanged = masterConnectedOrig.nNonZeros() numChanges = origNumConnections - numUnchanged numChanges += self._masterConnectedM[masterNum].nNonZeros() \ - numUnchanged self._stats['numChangedConnectionsSum'][masterNum] += numChanges self._stats['numLearns'][masterNum] += 1 # Verbose? if self.spVerbosity >= 3: print " done cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " newConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) self._printSyns(columnNum, prefix=' ', showValues=(self.spVerbosity >= 4)) print # Return list of updated masters if cloningOn: return list(visitedMasters) else: return onCellIndices ############################################################################# def _updatePermanenceGivenInputPy(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Given the input to a master coincidence, update it's permanence values based on our learning rules. On Entry, we are given the slice of the permanence matrix that corresponds only to the area of the coincidence master that is within the borders of the entire input field. Parameters: ------------------------------------------------------------------------ columnNum: The column number of this cell masterNum: The master coincidence that corresponds to this column input: The entire input, shaped appropriately inputUse: The same shape as input. Each entry is a count of the number of *currently active cells* that are connected to that input. permanence: The entire masterPermanence matrix for this master permanenceSlice: The slice of the masterPermanence matrix for this master that intersects the input field, i.e. does not overhang the outside edges of the input. activeInputSlice: The portion of 'input' that intersects permanenceSlice, set to True where input != 0 permChangesSlice: The portion of 'input' that intersects permanenceSlice, set to self.synPermActiveInc where input != 0 and self.synPermInactiveDec where the input == 0. This is used to optimally apply self.synPermActiveInc and self.synPermInactiveDec at the same time and can be used for any cell whose _synPermBoostFactor is set to 1.0. """ # Apply the baseline increment/decrements permanenceSlice += permChangesSlice # If this cell has permanence boost, apply the incremental ############################################################################# def _updatePermanenceGivenInputCPP(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but using a C++ implementation. """ inputNCols = self.inputShape[1] masterNCols = self._masterPotentialM[masterNum].shape[1] #TODO: synPermBoostFactors has been removed. CPP implementation has not been updated for this. adjustMasterValidPermanence(columnNum, masterNum, inputNCols, masterNCols, self.synPermActiveInc, self.synPermInactiveDec, self.synPermActiveSharedDec, input, inputUse, self._inputSlices2, self._coincSlices2, self._synPermBoostFactors, permanence) ############################################################################# def _updatePermanenceGivenInputTest(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but compares the python and C++ implementations. """ mp2 = copy.deepcopy(permanence) mvp2 = copy.deepcopy(permanenceSlice) # Py version import pdb; pdb.set_trace() self._updatePermanenceGivenInputPy(columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice) # C++ version self._updatePermanenceGivenInputCPP(columnNum, masterNum, input, inputUse, mp2, mvp2, activeInputSlice, permChangesSlice) if abs(mp2 - permanence).max() > 1e-6: print abs(mp2 - permanence).max() import pdb; pdb.set_trace() sys.exit(0) ############################################################################# def _periodicStatsCreate(self): """ Allocate the periodic stats structure Parameters: ------------------------------------------------------------------ """ self._stats = dict() self._stats['numChangedConnectionsSum'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['numLearns'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['maxBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # This dict maintains mappings of specific input patterns to specific # output patterns. It is used to detect "thrashing" of cells. We measure # how similar the output presentation of a specific input is to the # last time we saw it. self._stats['inputPatterns'] = dict() self._stats['inputPatternsLimit'] = 5000 self._periodicStatsReset() ############################################################################# def _periodicStatsReset(self): """ Reset the periodic stats this is done every N iterations before capturing a new set of stats Parameters: ------------------------------------------------------------------ """ self._stats['numSamples'] = 0 self._stats['numOnSum'] = 0 self._stats['underCoveragePctSum'] = 0 self._stats['overCoveragePctSum'] = 0 self._stats['cellOverlapSums'] = 0 self._stats['cellPctOverlapSums'] = 0 self._stats['explainedInputsCurIteration'] = set() self._stats['startTime'] = time.time() # These keep a count of the # of changed connections per update # for each master self._stats['numChangedConnectionsSum'].fill(0) self._stats['numLearns'].fill(0) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'].fill(self.maxFiringBoost) self._stats['maxBoostFactor'].fill(0) # This keeps track of the average distance between the SP output of # a specific input pattern now and the last time we saw it. self._stats['outputPatternDistanceSum'] = 0 self._stats['outputPatternSamples'] = 0 ############################################################################# def _periodicStatsComputeEnd(self, activeCells, activeInputs): """ Called at the end of compute. This increments the number of computes and also summarizes the under and over coverage and whatever other periodic stats we need. If the period is up, it then prints the accumuated stats and resets them for the next period Parameters: ------------------------------------------------------------------ activeCells: list of the active cells activeInputs: list of the active inputs """ # Update number of samples self._stats['numSamples'] += 1 # Compute under and over coverage numOn = len(activeCells) self._stats['numOnSum'] += numOn expInput = self._stats['explainedInputsCurIteration'] inputLen = len(activeInputs) underCoverage = len(set(activeInputs).difference(expInput)) self._stats['underCoveragePctSum'] += float(underCoverage) / inputLen expInput.difference_update(activeInputs) overCoverage = len(expInput) self._stats['overCoveragePctSum'] += float(overCoverage) / inputLen # Keep track of the min and max boost factor seen for each column numpy.minimum(self._firingBoostFactors, self._stats['minBoostFactor'], self._stats['minBoostFactor']) numpy.maximum(self._firingBoostFactors, self._stats['maxBoostFactor'], self._stats['maxBoostFactor']) # Calculate the distance in the SP output between this input now # and the last time we saw it. inputPattern = str(sorted(activeInputs)) (outputNZ, sampleIdx) = \ self._stats['inputPatterns'].get(inputPattern, (None, None)) activeCellSet = set(activeCells) if outputNZ is not None: distance = len(activeCellSet.difference(outputNZ)) \ + len(outputNZ.difference(activeCellSet)) #print "DISTANCE: ", distance #if len(self._stats['inputPatterns']) == 100 and distance > 0: # print "input pattern (%d):" % (sampleIdx), inputPattern # print "prior output:", sorted(outputNZ) # print "new output:", sorted(activeCellSet) # print "distance: ", distance self._stats['inputPatterns'][inputPattern] = (activeCellSet, sampleIdx) self._stats['outputPatternDistanceSum'] += distance self._stats['outputPatternSamples'] += 1 # Add this sample to our dict, if it's not too large already elif len(self._stats['inputPatterns']) < self._stats['inputPatternsLimit']: self._stats['inputPatterns'][inputPattern] = \ (activeCellSet, self._iterNum) # ----------------------------------------------------------------------- # If it's not time to print them out, return now if (self._iterNum % self.printPeriodicStats) != 0: return numSamples = float(self._stats['numSamples']) # Calculate number of changes made per master masterTouched = numpy.where(self._stats['numLearns'] > 0) if len(masterTouched[0]) == 0: numMasterChanges = numpy.zeros(1) else: numMasterChanges = self._stats['numChangedConnectionsSum'][masterTouched] numMasterChanges /= self._stats['numLearns'][masterTouched] # This fills in the static learning stats into self._learningStats self.getLearningStats() # Calculate and copy the transient learning stats into the # self._learningStats dict, for possible retrieval later by # the getLearningStats() method self._learningStats['elapsedTime'] = time.time() - self._stats['startTime'] self._learningStats['activeCountAvg'] = self._stats['numOnSum'] / numSamples self._learningStats['underCoveragePct'] = \ 100.0*self._stats['underCoveragePctSum'] / numSamples self._learningStats['overCoveragePct'] = \ 100.0*self._stats['overCoveragePctSum'] / numSamples self._learningStats['numConnectionChangesAvg'] = numMasterChanges.mean() self._learningStats['numConnectionChangesMin'] = numMasterChanges.min() self._learningStats['numConnectionChangesMax'] = numMasterChanges.max() self._learningStats['avgCellOverlap'] = \ float(self._stats['cellOverlapSums']) / max(1, self._stats['numOnSum']) self._learningStats['avgCellPctOverlap'] = \ 100.0*self._stats['cellPctOverlapSums'] / max(1, self._stats['numOnSum']) self._learningStats['firingBoostMaxChangePct'] = 100.0 \ * (self._stats['maxBoostFactor'] / self._stats['minBoostFactor']).max() \ - 100.0 self._learningStats['outputRepresentationChangeAvg'] = \ float(self._stats['outputPatternDistanceSum']) / \ max(1, self._stats['outputPatternSamples']) self._learningStats['outputRepresentationChangePctAvg'] = \ 100.0 * self._learningStats['outputRepresentationChangeAvg'] / \ max(1,self._learningStats['activeCountAvg']) self._learningStats['numUniqueInputsSeen'] = \ len(self._stats['inputPatterns']) if self._learningStats['numUniqueInputsSeen'] >= \ self._stats['inputPatternsLimit']: self._learningStats['numUniqueInputsSeen'] = -1 # ------------------------------------------------------------------- # Print all stats captured print "Learning stats for the last %d iterations:" % (numSamples) print " iteration #: %d" % (self._iterNum) print " inference iteration #: %d" % (self._inferenceIterNum) print " elapsed time: %.2f" \ % (self._learningStats['elapsedTime']) print " avg activeCount: %.1f" \ % (self._learningStats['activeCountAvg']) print " avg under/overCoverage: %-6.1f / %-6.1f %%" \ % (self._learningStats['underCoveragePct'], self._learningStats['overCoveragePct']) print " avg cell overlap: %-6.1f / %-6.1f %%" \ % (self._learningStats['avgCellOverlap'], self._learningStats['avgCellPctOverlap']) print " avg/min/max RF radius: %-6.1f / %-6.1f / %-6.1f" \ % (self._learningStats['rfRadiusAvg'], self._learningStats['rfRadiusMin'], self._learningStats['rfRadiusMax']) print " inhibition radius: %d" \ % (self._learningStats['inhibitionRadius']) print " target density: %.5f %%" \ % (self._learningStats['targetDensityPct']) print " avg/min/max coinc. size: %-6.1f / %-6d / %-6d" \ % (self._learningStats['coincidenceSizeAvg'], self._learningStats['coincidenceSizeMin'], self._learningStats['coincidenceSizeMax']) print " avg/min/max DC before inh: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['dcBeforeInhibitionAvg'], self._learningStats['dcBeforeInhibitionMin'], self._learningStats['dcBeforeInhibitionMax']) print " avg/min/max DC after inh: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['dcAfterInhibitionAvg'], self._learningStats['dcAfterInhibitionMin'], self._learningStats['dcAfterInhibitionMax']) print " avg/min/max boost: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['firingBoostAvg'], self._learningStats['firingBoostMin'], self._learningStats['firingBoostMax']) print " avg/min/max # conn. changes: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['numConnectionChangesAvg'], self._learningStats['numConnectionChangesMin'], self._learningStats['numConnectionChangesMax']) print " max change in boost: %.1f %%" \ % (self._learningStats['firingBoostMaxChangePct']) print " avg change in output repr.: %-6.1f / %-6.1f %%" \ % (self._learningStats['outputRepresentationChangeAvg'], 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) print " # of unique input pats seen: %d" \ % (self._learningStats['numUniqueInputsSeen']) #self._printMemberSizes() # Reset the stats for the next period self._periodicStatsReset() ############################################################################## def _printInputSlice(self, inputSlice, prefix=''): """ Print the given input slice in a nice human readable format. Parameters: --------------------------------------------------------------------- cell: The slice of input to print prefix: This is printed at the start of each row of the coincidence """ # Shape of each coincidence (rfHeight, rfWidth) = inputSlice.shape syns = inputSlice != 0 def _synStr(x): if not x: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) print prefix, ''.join(items) ############################################################################## def _printSyns(self, cell, prefix='', showValues=False): """ Print the synapse permanence values for the given cell in a nice, human, readable format. Parameters: --------------------------------------------------------------------- cell: which cell to print prefix: This is printed at the start of each row of the coincidence showValues: If True, print the values of each permanence. If False, just print a ' ' if not connected and a '*' if connected """ # Shape of each coincidence (rfHeight, rfWidth) = self.inputShape # Get the synapse permanences. masterNum = self._cloneMapFlat[cell] syns = self._masterPermanenceM[masterNum].toDense() if showValues: def _synStr(x): if x == 0: return ' -- ' elif x < 0.001: return ' 0 ' elif x >= self.synPermConnected: return '#%3.2f' % x else: return ' %3.2f' % x else: def _synStr(x): if x < self.synPermConnected: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) if showValues: print prefix, ' '.join(items) else: print prefix, ''.join(items) ############################################################################## def _printMemberSizes(self, over=100): """ Print the size of each member """ members = self.__dict__.keys() sizeNamePairs = [] totalSize = 0 for member in members: item = self.__dict__[member] if hasattr(item, '__func__'): continue try: if hasattr(item, '__len__'): size = 0 for i in xrange(len(item)): size += len(cPickle.dumps(item[i])) else: size = len(cPickle.dumps(item)) except: print "WARNING: Can't pickle %s" % (member) size = 0 sizeNamePairs.append((size, member)) totalSize += size # Print them out from highest to lowest sizeNamePairs.sort(reverse=True) for (size, name) in sizeNamePairs: if size > over: print "%10d (%10.3fMb) %s" % (size, size/1000000.0, name) print "\nTOTAL: %10d (%10.3fMB) " % (totalSize, totalSize/1000000.0) ############################################################################## def printParams(self): """ Print the main creation parameters associated with this instance. """ print "FDRCSpatial2 creation parameters: " print "inputShape =", self.inputShape print "inputBorder =", self.inputBorder print "inputDensity =", self.inputDensity print "coincidencesShape =", self.coincidencesShape print "coincInputRadius =", self.coincInputRadius print "coincInputPoolPct =", self.coincInputPoolPct print "gaussianDist =", self.gaussianDist print "commonDistributions =", self.commonDistributions print "localAreaDensity =", self.localAreaDensity print "numActivePerInhArea =", self.numActivePerInhArea print "stimulusThreshold =", self.stimulusThreshold print "synPermInactiveDec =", self.synPermInactiveDec print "synPermActiveInc =", self.synPermActiveInc print "synPermActiveSharedDec =", self.synPermActiveSharedDec print "synPermOrphanDec =", self.synPermOrphanDec print "synPermConnected =", self.synPermConnected print "minPctDutyCycleBeforeInh =", self.minPctDutyCycleBeforeInh print "minPctDutyCycleAfterInh =", self.minPctDutyCycleAfterInh print "dutyCyclePeriod =", self.dutyCyclePeriod print "maxFiringBoost =", self.maxFiringBoost print "maxSSFiringBoost =", self.maxSSFiringBoost print "maxSynPermBoost =", self.maxSynPermBoost print "minDistance =", self.minDistance print "spVerbosity =", self.spVerbosity print "printPeriodicStats =", self.printPeriodicStats print "testMode =", self.testMode print "numCloneMasters =", self.numCloneMasters removed topDownCompute from FDRCSpatial2 #!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import sys import os import numpy import numpy.random import random import itertools import time import math import copy import cPickle import struct from nupic.bindings.math import SM32, SM_01_32_32, count_gte, GetNTAReal from nupic.bindings.algorithms import Inhibition2, cpp_overlap, cpp_overlap_sbm from nupic.bindings.algorithms import adjustMasterValidPermanence from nupic.bindings.math import Random as NupicRandom from nupic.math.cross import cross from operator import itemgetter import nupic.research.fdrutilities as fdru realDType = GetNTAReal() gPylabInitialized = False # kDutyCycleFactor add dutyCycleAfterInh to overlap in Inhibition step to be a # tie breaker kDutyCycleFactor = 0.01 ################################################################################ def _extractCallingMethodArgs(): """ Returns args dictionary from the calling method """ import inspect import copy callingFrame = inspect.stack()[1][0] argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame) argNames.remove("self") args = copy.copy(frameLocalVarDict) for varName in frameLocalVarDict: if varName not in argNames: args.pop(varName) return args """ Class for spatial pooling based on fixed random distributed representation (FDR) """ class FDRCSpatial2(object): """ This version of FDRCSpatial inlcudes adaptive receptive fields, no-dupe rules and gradual boosting. It supports 1-D and 2-D topologies with cloning """ def __init__(self, inputShape = (32, 32), inputBorder = 8, inputDensity = 1.0, coincidencesShape = (48, 48), coincInputRadius = 16, coincInputPoolPct = 1.0, gaussianDist = False, commonDistributions = False, localAreaDensity = -1.0, numActivePerInhArea = 10.0, stimulusThreshold = 0, synPermInactiveDec = 0.01, synPermActiveInc = 0.1, synPermActiveSharedDec = 0.0, synPermOrphanDec = 0.0, synPermConnected = 0.10, minPctDutyCycleBeforeInh = 0.001, minPctDutyCycleAfterInh = 0.001, dutyCyclePeriod = 1000, maxFiringBoost = 10.0, maxSSFiringBoost = 2.0, maxSynPermBoost = 10.0, minDistance = 0.0, cloneMap = None, numCloneMasters = -1, seed = -1, spVerbosity = 0, printPeriodicStats = 0, testMode = False, globalInhibition = False, spReconstructionParam = "unweighted_mean", useHighTier = True, randomSP = False, ): """ Parameters: ---------------------------- inputShape: The dimensions of the input vector. Format is (height, width) e.g. (24, 72). If the input is from a sensor, it is interpreted as having a 2-D topology of 24 pixels high and 72 wide. inputBorder: The first column from an edge will be centered over an input which is 'inputBorder' inputs from the edge. inputDensity: The density of the input. This is only to aid in figuring out the initial number of connected synapses to place on each column. The lower the inputDensity, the more initial connections will be assigned to each column. coincidencesShape: The dimensions of column layout. Format is (height, width) e.g. (80,100) means a total of 80*100 = 800 are arranged in a 2-D topology with 80 rows and 100 columns. coincInputRadius: This defines the max radius of the receptive field of each column. This is used to limit memory requirements and processing time. It could be set large enough to encompass the entire input field and the SP would still work fine, but require more memory and processing time. This parameter defines a square area: a column will have a max square RF with sides of length 2 * coincInputRadius + 1. coincInputPoolPct What percent of the columns's receptive field is available for potential synapses. At initialization time, we will choose coincInputPoolPct * (2*coincInputRadius + 1)^2 potential synapses from the receptive field. gaussianDist: If true, the initial permanences assigned to each column will have a gaussian distribution to them, making the column favor inputs directly below it over inputs farther away. If false, the initial permanences will have a random distribution across the column's entire potential receptive field. commonDistributions: If set to True (the default, faster startup time), each column will be given the same initial permanence values. This is normally OK when you will be training, but if you will be sticking with the untrained network, you will want to set this to False (which makes startup take longer). localAreaDensity: The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area). numActivePerInhArea: An alternate way to control the density of the active columns. If numActivePerInhArea is specified then localAreaDensity must be -1, and vice versa. When using numActivePerInhArea, the inhibition logic will insure that at most 'numActivePerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields. stimulusThreshold: This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. synPermInactiveDec: How much an inactive synapse is decremented, specified as a percent of a fully grown synapse. synPermActiveInc: How much to increase the permanence of an active synapse, specified as a percent of a fully grown synapse. synPermActiveSharedDec: How much to decrease the permanence of an active synapse which is connected to another column that is active at the same time. Specified as a percent of a fully grown synapse. synPermOrphanDec: How much to decrease the permanence of an active synapse on a column which has high overlap with the input, but was inhibited (an "orphan" column). synPermConnected: The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing. Typical value is 0.10. Cells whose activity level before inhibition falls below minDutyCycleBeforeInh will have their own internal synPermConnectedCell threshold set below this default value. (This concept applies to both SP and TP and so 'cells' is correct here as opposed to 'columns') minPctDutyCycleBeforeInh: A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the duty cycle before inhibition of all other column within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle before inhibition falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns due to the no-dupe rule. minPctDutyCycleAfterInh: A number between 0 and 1.0, used to set a floor on how often a column should turn ON after inhibition. Periodically, each column looks at the duty cycle after inhibition of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased. dutyCyclePeriod: The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate. maxFiringBoost: The maximum firing level boost factor. Each column's raw firing strength gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxFiringBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxFiringBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. maxSSFiringBoost: Once a column turns ON, it's boost will immediately fall down to maxSSFiringBoost if it is above it. This is accomplished by internally raising it's computed duty cycle accordingly. This prevents a cell which has had it's boost raised extremely high from turning ON for too many diverse inputs in a row within a short period of time. maxSynPermBoost: The maximum synPermActiveInc boost factor. Each column's synPermActiveInc gets multiplied by a boost factor to make the column more or less likely to form new connections. The actual boost factor used is a number between 1.0 and maxSynPermBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxSynPermBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. minDistance: This parameter impacts how finely the input space is quantized. It is a value between 0 and 1.0. If set to 0, then every unique input presentation will generate a unique output representation, within the limits of the total number of columns available. Higher values will tend to group similar inputs together into the same output representation. Only column which overlap with the input less than 100*(1.0-minDistance) percent will have a possibility of losing the inhibition competition against a boosted, 'bored' cell. cloneMap: An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. numCloneMasters: The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. seed: Seed for our own pseudo-random number generator. spVerbosity: spVerbosity level: 0, 1, 2, or 3 printPeriodicStats: If > 0, then every 'printPeriodicStats' iterations, the SP will print to stdout some statistics related to learning, such as the average pct under and over-coverage, average number of active columns, etc. in the last 'showLearningStats' iterations. testMode: If True, run the SP in test mode. This runs both the C++ and python implementations on all internal functions that support both and insures that both produce the same result. globalInhibition: If true, enforce the localAreaDensity/numActivePerInhArea globally over the entire region, ignoring any dynamically calculated inhibitionRadius. In effect, this is the same as setting the inhibition radius to include the entire region. spReconstructionParam:Specifies which SP reconstruction optimization to be used. Each column's firing strength is weighted by the percent Overlap, permanence or duty Cycle if this parameter is set to 'pctOverlap', 'permanence', or 'dutycycle' respectively. If parameter is set to 'maximum_firingstrength', the maximum of the firing strengths (weighted by permanence) is used instead of the weighted sum. useHighTier: The "high tier" feature is to deal with sparse input spaces. If over (1-minDistance) percent of a column's connected synapses are active, it will automatically become one of the winning columns. If False, columns are activated based on their absolute overlap with the input. Also, boosting will be disabled to prevent pattern oscillation randomSP: If True, the SP will not update its permanences and will instead use it's initial configuration for all inferences. """ #-------------------------------------------------------------------------- # Save our __init__ args for debugging self._initArgsDict = _extractCallingMethodArgs() # Handle people instantiating us directly that don't pass in a cloneMap... # This creates a clone map without any cloning if cloneMap is None: cloneMap, numCloneMasters = fdru.makeCloneMap( columnsShape=coincidencesShape, outputCloningWidth=coincidencesShape[1], outputCloningHeight=coincidencesShape[0] ) self.numCloneMasters = numCloneMasters self._cloneMapFlat = cloneMap.reshape((-1,)) # Save creation parameters self.inputShape = (int(inputShape[0]), int(inputShape[1])) self.inputBorder = inputBorder self.inputDensity = inputDensity self.coincidencesShape = coincidencesShape self.coincInputRadius = coincInputRadius self.coincInputPoolPct = coincInputPoolPct self.gaussianDist = gaussianDist self.commonDistributions = commonDistributions self.localAreaDensity = localAreaDensity self.numActivePerInhArea = numActivePerInhArea self.stimulusThreshold = stimulusThreshold self.synPermInactiveDec = synPermInactiveDec self.synPermActiveInc = synPermActiveInc self.synPermActiveSharedDec = synPermActiveSharedDec self.synPermOrphanDec = synPermOrphanDec self.synPermConnected = synPermConnected self.minPctDutyCycleBeforeInh = minPctDutyCycleBeforeInh self.minPctDutyCycleAfterInh = minPctDutyCycleAfterInh self.dutyCyclePeriod = dutyCyclePeriod self.maxFiringBoost = maxFiringBoost self.maxSSFiringBoost = maxSSFiringBoost self.maxSynPermBoost = maxSynPermBoost self.minDistance = minDistance self.spVerbosity = spVerbosity self.printPeriodicStats = printPeriodicStats self.testMode = testMode self.globalInhibition = globalInhibition self.spReconstructionParam = spReconstructionParam self.useHighTier= useHighTier != 0 self.randomSP = randomSP != 0 if not self.useHighTier: self.minPctDutyCycleAfterInh = 0 self.fileCount = 0 self._runIter = 0 # Start at iteration #0 self._iterNum = 0 # Number of learning iterations self._inferenceIterNum = 0 # Number of inference iterations # Print creation parameters if spVerbosity >= 3: self.printParams() print "seed =", seed # Check for errors assert (self.numActivePerInhArea == -1 or self.localAreaDensity == -1) assert (self.inputShape[1] > 2*self.inputBorder) # 1D layouts have inputShape[0] == 1 if self.inputShape[0] > 1: assert (self.inputShape[0] > 2*self.inputBorder) # Calculate other member variables self._coincCount = int(self.coincidencesShape[0] * \ self.coincidencesShape[1]) self._inputCount = int(self.inputShape[0] * self.inputShape[1]) self._synPermMin = 0.0 self._synPermMax = 1.0 self._pylabInitialized = False # The rate at which we bump up all synapses in response to not passing # stimulusThreshold self._synPermBelowStimulusInc = self.synPermConnected / 10.0 self._hasTopology = True if self.inputShape[0] == 1: # 1-D layout self._coincRFShape = (1, (2*coincInputRadius + 1)) # If we only have 1 column of coincidences, then assume the user wants # each coincidence to cover the entire input if self.coincidencesShape[1] == 1: assert self.inputBorder >= (self.inputShape[1] - 1) // 2 assert coincInputRadius >= (self.inputShape[1] - 1) // 2 self._coincRFShape = (1, self.inputShape[1]) self._hasTopology = False else: # 2-D layout self._coincRFShape = ((2*coincInputRadius + 1), (2*coincInputRadius + 1)) # This gets set to True in finishLearning. Once set, we don't allow # learning anymore and delete all member variables needed only for # learning self._doneLearning = False # Init random seed self._seed(seed) # Hard-coded in the current case self.randomTieBreakingFraction = 0.5 # The permanence values used to initialize the master coincs are from # this initial permanence array # The initial permanence is gaussian shaped with mean at center and variance # carefully chosen to have connected synapses initialPermanence = self._initialPermanence() # masterPotentialM, masterPermanenceM and masterConnectedM are numpy arrays # of dimensions (coincCount, coincRfShape[0], coincRFShape[1]) # # masterPotentialM: Keeps track of the potential synapses of each # master. Potential synapses are marked as True # masterPermanenceM: Holds the permanence values of the potential synapses. # The values can range from 0.0 to 1.0 # masterConnectedM: Keeps track of the connected synapses of each # master. Connected synapses are the potential synapses # with permanence values greater than synPermConnected. self._masterPotentialM, self._masterPermanenceM = \ self._makeMasterCoincidences(self.numCloneMasters, self._coincRFShape, self.coincInputPoolPct, initialPermanence, self.random) # Update connected coincidences, the connected synapses have permanence # values greater than synPermConnected self._masterConnectedM = [] dense = numpy.zeros(self._coincRFShape) for i in xrange(self.numCloneMasters): self._masterConnectedM.append(SM_01_32_32(dense)) # coinc sizes are used in normalizing the raw overlaps self._masterConnectedCoincSizes = numpy.empty(self.numCloneMasters, 'uint32') # Make one mondo coincidence matrix for all cells at once. It has one row # per cell. The width of each row is the entire input width. There will be # ones in each row where that cell has connections. When we have cloning, # and we modify the connections for a clone master, we will update all # cells that share that clone master with the new connections. self._allConnectedM = SM_01_32_32(self._inputCount) self._allConnectedM.resize(self._coincCount, self._inputCount) # ========================================================================= # Initialize the dutyCycles and boost factors per clone master self._dutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._dutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) # TODO: We don't need to store _boostFactors, can be calculated from duty # cycle self._firingBoostFactors = numpy.ones(self.numCloneMasters, dtype=realDType) if self.useHighTier: self._firingBoostFactors *= maxFiringBoost # Selectively turn on/off C++ for various methods if self.testMode: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" else: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" # This is used to hold our learning stats (via getLearningStats()) self._learningStats = dict() # These will hold our random state, which we return from __getstate__ and # reseed our random number generators from in __setstate__ so that # a saved/restored SP produces the exact same behavior as one that # continues. This behavior allows us to write unit tests that verify that # the behavior of an SP does not change due to saving/loading from a # checkpoint self._randomState = None self._numpyRandomState = None self._nupicRandomState = None # ========================================================================= # Init ephemeral members # This also calculates the slices and global inhibitionRadius and allocates # the inhibitionObj self._initEphemerals() # ========================================================================= # If we have no cloning, make sure no column has potential or connected # synapses outside the input area if self.numCloneMasters == self._coincCount: validMask = numpy.zeros(self._coincRFShape, dtype=realDType) for masterNum in xrange(self._coincCount): coincSlice = self._coincSlices[masterNum] validMask.fill(0) validMask[coincSlice] = 1 self._masterPotentialM[masterNum].logicalAnd(SM_01_32_32(validMask)) self._masterPermanenceM[masterNum].elementMultiply(validMask) # Raise all permanences up until the number of connected is above # our desired target, self._raiseAllPermanences(masterNum, minConnections = self.stimulusThreshold / self.inputDensity) # ========================================================================= #self._cellMappings = [] #for cell in xrange(self._coincCount): # inputSlice = self._inputSlices[cell] # mapping = self._inputLayout[inputSlice] # self._cellMappings.append(mapping) # ========================================================================= # Calculate the number of connected synapses in each master coincidence now self._updateConnectedCoincidences() ############################################################################# def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ return [ '_inputLayout', '_cellsForMaster', '_columnCenters', #'_cellRFClipped', '_inputSlices', '_coincSlices', '_activeInput', '_permChanges', '_dupeInput', '_onCells', '_masterOnCells', '_onCellIndices', '_inhibitionObj', '_denseOutput', '_overlaps', '_anomalyScores', '_inputUse', '_updatePermanenceGivenInputFP', '_computeOverlapsFP', '_stats', '_rfRadiusAvg', '_rfRadiusMin', '_rfRadiusMax', '_topDownOut', '_topDownParentCounts', ] ############################################################################# def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ # Used by functions which refers to inputs in absolute space # getLearnedCM, cm,.... self._inputLayout = numpy.arange(self._inputCount, dtype=numpy.uint32).reshape(self.inputShape) # This array returns the list of cell indices that correspond to each master cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: self._cellsForMaster = [] for masterNum in xrange(self.numCloneMasters): self._cellsForMaster.append( numpy.where(self._cloneMapFlat == masterNum)[0]) else: self._cellsForMaster = None # TODO: slices are not required for the C++ helper functions # Figure out the slices of shaped input that each column sees... # Figure out the valid region of each column # The reason these slices are in initEphemerals is because numpy slices # can't be pickled self._setSlices() # This holds the output of the inhibition computation - which cells are # on after inhibition self._onCells = numpy.zeros(self._coincCount, dtype=realDType) self._masterOnCells = numpy.zeros(self.numCloneMasters, dtype=realDType) self._onCellIndices = numpy.zeros(self._coincCount, dtype='uint32') # The inhibition object gets allocated by _updateInhibitionObj() during # the first compute and re-allocated periodically during learning self._inhibitionObj = None self._rfRadiusAvg = 0 # Also calculated by _updateInhibitionObj self._rfRadiusMin = 0 self._rfRadiusMax = 0 # Used by the caller to optionally cache the dense output self._denseOutput = None # This holds the overlaps (in absolute number of connected synapses) of each # coinc with input self._overlaps = numpy.zeros(self._coincCount, dtype=realDType) # This holds the percent overlaps (number of active inputs / number of # connected synapses) of each coinc with input self._pctOverlaps = numpy.zeros(self._coincCount, dtype=realDType) # This is the value of the anomaly score for each column (after inhibition) self._anomalyScores = numpy.zeros_like(self._overlaps) # This holds the overlaps before stimulus threshold - used for verbose # messages only self._overlapsBST = numpy.zeros(self._coincCount, dtype=realDType) # This holds the number of coincs connected to an input if not self._doneLearning: self._inputUse = numpy.zeros(self.inputShape, dtype=realDType) # These are boolean matrices, the same shape as the input if not self._doneLearning: self._activeInput = numpy.zeros(self.inputShape, dtype='bool') self._dupeInput = numpy.zeros(self.inputShape, dtype='bool') # This is used to hold self.synPermActiveInc where the input is on # and -self.synPermInctiveDec where the input is off if not self._doneLearning: self._permChanges = numpy.zeros(self.inputShape, dtype=realDType) # These are used to compute and hold the output from topDownCompute # self._topDownOut = numpy.zeros(self.inputShape, dtype=realDType) # self._topDownParentCounts = numpy.zeros(self.inputShape, dtype='int') # Fill in the updatePermanenceGivenInput method pointer, which depends on # chosen language if self._updatePermanenceGivenInputImp == "py": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputPy elif self._updatePermanenceGivenInputImp == "cpp": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputCPP elif self._updatePermanenceGivenInputImp == "test": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputTest else: assert (False) # Fill in the computeOverlaps method pointer, which depends on # chosen language if self._computeOverlapsImp == "py": self._computeOverlapsFP = self._computeOverlapsPy elif self._computeOverlapsImp == "cpp": self._computeOverlapsFP = self._computeOverlapsCPP elif self._computeOverlapsImp == "test": self._computeOverlapsFP = self._computeOverlapsTest else: assert (False) # ---------------------------------------------------------------------- # These variables are used for keeping track of learning statistics (when # self.printPeriodicStats is used). self._periodicStatsCreate() ############################################################################# def compute(self, flatInput, learn=False, infer=True, computeAnomaly=False): """ Compute with the current input vector. Parameters: ---------------------------- input : the input vector (numpy array) learn : if True, adapt the input histogram based on this input infer : whether to do inference or not """ # If we are using a random SP, ignore the learn parameter if self.randomSP: learn = False # If finishLearning has been called, don't allow learning anymore if learn and self._doneLearning: raise RuntimeError("Learning can not be performed once finishLearning" " has been called.") assert (learn or infer) assert (flatInput.ndim == 1) and (flatInput.shape[0] == self._inputCount) assert (flatInput.dtype == realDType) input = flatInput.reshape(self.inputShape) # Make sure we've allocated the inhibition object lazily if self._inhibitionObj is None: self._updateInhibitionObj() # Reset first timer if self.printPeriodicStats > 0 and self._iterNum == 0: self._periodicStatsReset() # Using cloning? cloningOn = (self.numCloneMasters != self._coincCount) # If we have high verbosity, save the overlaps before stimulus threshold # so we can print them out at the end if self.spVerbosity >= 2: print "===============================================================" print "Iter:%d" % self._iterNum, "inferenceIter:%d" % \ self._inferenceIterNum self._computeOverlapsFP(input, stimulusThreshold=0) self._overlapsBST[:] = self._overlaps connectedCountsOnEntry = self._masterConnectedCoincSizes.copy() if self.spVerbosity >= 3: inputNZ = flatInput.nonzero()[0] print "active inputs: (%d)" % len(inputNZ), inputNZ # ---------------------------------------------------------------------- # TODO: Port to C++, arguments may be different - t1YXArr, # coincInputRadius,... # Calculate the raw overlap of each cell # Overlaps less than stimulus threshold are set to zero in # _calculateOverlaps # This places the result into self._overlaps self._computeOverlapsFP(input, stimulusThreshold=self.stimulusThreshold) # Save the original overlap values, before boosting, for the purpose of # anomaly detection if computeAnomaly: self._anomalyScores[:] = self._overlaps[:] if learn: # ---------------------------------------------------------------------- # Update each cell's duty cycle before inhibition # Only cells with overlaps greater stimulus threshold are considered as # active. # Stimulus threshold has already been applied # TODO: Port to C++? Loops over all coincs # Only updating is carried out here, bump up happens later onCellIndices = numpy.where(self._overlaps > 0) if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: self._onCells.fill(0) self._onCells[onCellIndices] = 1 denseOn = self._onCells # dutyCyclePeriod = self._iterNum + 1 let _dutyCycleBeforeInh # and _dutyCycleAfterInh represent real firing percentage at the # beginning of learning. This will effect boosting and let unlearned # coincidences have high boostFactor at beginning. self.dutyCyclePeriod = min(self._iterNum + 1, 1000) self._dutyCycleBeforeInh = ((self.dutyCyclePeriod-1) \ * self._dutyCycleBeforeInh + denseOn) / self.dutyCyclePeriod # ---------------------------------------------------------------------- # Compute firing levels based on boost factor and raw overlap. Update # self._overlaps in place, replacing it with the boosted overlap. We also # computes percent overlap of each column and store that into # self._pctOverlaps # With cloning if cloningOn: self._pctOverlaps[:] = self._overlaps self._pctOverlaps /= self._masterConnectedCoincSizes[self._cloneMapFlat] boostFactors = self._firingBoostFactors[self._cloneMapFlat] else: self._pctOverlaps[:] = self._overlaps potentials = self._masterConnectedCoincSizes self._pctOverlaps /= numpy.maximum(1, potentials) boostFactors = self._firingBoostFactors # To process minDistance, we do the following: # 1.) All cells which do not overlap the input "highly" (less than # minDistance), are considered to be in the "low tier" and get their # overlap multiplied by their respective boost factor. # 2.) All other cells, which DO overlap the input highly, get a "high tier # offset" added to their overlaps, and boost is not applied. The # "high tier offset" is computed as the max of all the boosted # overlaps from step #1. This insures that a cell in this high tier # will never lose to a cell from the low tier. # if self.useHighTier \ # and len(numpy.where(self._dutyCycleAfterInh == 0)[0]) == 0: # self.useHighTier = False if self.useHighTier: highTier = numpy.where(self._pctOverlaps >= (1.0 - self.minDistance))[0] else: highTier = [] someInHighTier = len(highTier) > 0 if someInHighTier: boostFactors = numpy.array(boostFactors) boostFactors[highTier] = 1.0 # apply boostFactors only in learning phase not in inference phase. if learn: self._overlaps *= boostFactors if someInHighTier: highTierOffset = self._overlaps.max() + 1.0 self._overlaps[highTier] += highTierOffset # Cache the dense output for debugging if self._denseOutput is not None: self._denseOutput = self._overlaps.copy() # ---------------------------------------------------------------------- # Incorporate inhibition and see who is firing after inhibition. # We don't need this method to process stimulusThreshold because we # already processed it. # Also, we pass in a small 'addToWinners' amount which gets added to the # winning elements as we go along. This prevents us from choosing more than # topN winners per inhibition region when more than topN elements all have # the same max high score. learnedCellsOverlaps = numpy.array(self._overlaps) if infer and not learn: # Cells that have never learnt are not allowed to win during inhibition if not self.randomSP: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = 0 else: #Boost the unlearned cells to 1000 so that the winning columns are picked randomly #From the set of unlearned columns #Boost columns that havent been learned with uniformly to 1000 so that inhibition picks #randomly from them. if self.useHighTier: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = learnedCellsOverlaps.max() + 1 # #Boost columns that are in highTier (ie. they match the input very well learnedCellsOverlaps[highTier] += learnedCellsOverlaps.max() + 1 # Small random tiebreaker for columns with equal overlap tieBreaker = numpy.random.rand(*learnedCellsOverlaps.shape).astype(realDType) learnedCellsOverlaps += 0.1 * tieBreaker numOn = self._inhibitionObj.compute( learnedCellsOverlaps, self._onCellIndices, 0.0, # stimulusThreshold max(learnedCellsOverlaps)/1000.0, # addToWinners ) self._onCells.fill(0) if numOn > 0: onCellIndices = self._onCellIndices[0:numOn] self._onCells[onCellIndices] = 1 else: onCellIndices = [] # Compute the anomaly scores only for the winning columns if computeAnomaly: self._anomalyScores *= self._onCells self._anomalyScores *= self._dutyCycleAfterInh if self.spVerbosity >= 2: print "inhRadius", self._inhibitionObj.getInhibitionRadius() print "inhLocalAreaDensity", self._inhibitionObj.getLocalAreaDensity() print "numFiring", numOn # ---------------------------------------------------------------------- # Capturing learning stats? If so, capture the cell overlap statistics if self.printPeriodicStats > 0: activePctOverlaps = self._pctOverlaps[onCellIndices] self._stats['cellPctOverlapSums'] += activePctOverlaps.sum() if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] else: onMasterIndices = onCellIndices self._stats['cellOverlapSums'] += \ (activePctOverlaps * \ self._masterConnectedCoincSizes[onMasterIndices]).sum() # ---------------------------------------------------------------------- # Compute which cells had very high overlap, but were still # inhibited. These we are calling our "orphan cells", because they are # representing an input which is already better represented by another # cell. if self.synPermOrphanDec > 0: orphanCellIndices = set(numpy.where(self._pctOverlaps >= 1.0)[0]) orphanCellIndices.difference_update(onCellIndices) else: orphanCellIndices = [] if learn: # ---------------------------------------------------------------------- # Update the number of coinc connections per input # During learning (adapting permanence values), we need to be able to # recognize dupe inputs - inputs that go two 2 or more active cells if self.synPermActiveSharedDec != 0: self._updateInputUse(onCellIndices) # ---------------------------------------------------------------------- # For the firing cells, update permanence values onMasterIndices = self._adaptSynapses(onCellIndices, orphanCellIndices, input) # ---------------------------------------------------------------------- # Increase the permanence values of columns which haven't passed # stimulus threshold of overlap with at least a minimum frequency self._bumpUpWeakCoincidences() # ---------------------------------------------------------------------- # Update each cell's after-inhibition duty cycle # TODO: As the on-cells are sparse after inhibition, we can have # a different updateDutyCycles function taking advantage of the sparsity if cloningOn: self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: denseOn = self._onCells self._dutyCycleAfterInh = ((self.dutyCyclePeriod - 1) \ * self._dutyCycleAfterInh + denseOn) / self.dutyCyclePeriod # For the cell's that just fired with a very high boost, bring their # boost back down to 1.0. This prevents a cell that was trying very # hard to grab input, and was just successful, from grabbing every # other input that comes around in the near future. # for masterNum in onMasterIndices: # if self._firingBoostFactors[masterNum] > self.maxSSFiringBoost: # self._dutyCycleAfterInh[masterNum] = self._dutyCycleAfterInh.max()/5 #Set the duty cycle to a safe margin over the boosting duty cycle but #avoid artificially boosting it to a reconstruction significant level. # self._dutyCycleAfterInh[masterNum] = self.minPctDutyCycleAfterInh*10 # ---------------------------------------------------------------------- # Update the boost factors based on firings rate after inhibition self._updateBoostFactors() # ======================================================================= # Increment iteration number and perform our periodic tasks if it's time if ((self._iterNum + 1) % 50) == 0: self._updateInhibitionObj() self._updateMinDutyCycles(self._dutyCycleBeforeInh, self.minPctDutyCycleBeforeInh, self._minDutyCycleBeforeInh) self._updateMinDutyCycles(self._dutyCycleAfterInh, self.minPctDutyCycleAfterInh, self._minDutyCycleAfterInh) # Next iteration if learn: self._iterNum += 1 if infer: self._inferenceIterNum += 1 if learn: # ======================================================================= # Capture and possibly print the periodic stats if self.printPeriodicStats > 0: self._periodicStatsComputeEnd(onCellIndices, flatInput.nonzero()[0]) # Verbose print other stats if self.spVerbosity >= 2: cloning = (self.numCloneMasters != self._coincCount) print " #connected on entry: ", fdru.numpyStr(connectedCountsOnEntry, '%d ', includeIndices=True) print " #connected on exit: ", fdru.numpyStr( self._masterConnectedCoincSizes, '%d ', includeIndices=True) if self.spVerbosity >= 3 or not cloning: print " overlaps: ", fdru.numpyStr(self._overlapsBST, '%d ', includeIndices=True, includeZeros=False) print " firing levels: ", fdru.numpyStr(self._overlaps, '%.4f ', includeIndices=True, includeZeros=False) print " on after inhibition: ", onCellIndices if not self._doneLearning: print " minDutyCycleBeforeInh:", fdru.numpyStr( self._minDutyCycleBeforeInh, '%.4f ', includeIndices=True) print " dutyCycleBeforeInh: ", fdru.numpyStr(self._dutyCycleBeforeInh, '%.4f ', includeIndices=True) print " belowMinBeforeInh: " % numpy.nonzero( self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh)[0] print " minDutyCycleAfterInh: ", fdru.numpyStr( self._minDutyCycleAfterInh, '%.4f ', includeIndices=True) print " dutyCycleAfterInh: ", fdru.numpyStr(self._dutyCycleAfterInh, '%.4f ', includeIndices=True) print " belowMinAfterInh: " % numpy.nonzero( self._dutyCycleAfterInh \ < self._minDutyCycleAfterInh)[0] print " firingBoosts: ", fdru.numpyStr(self._firingBoostFactors, '%.4f ', includeIndices=True) print elif self.spVerbosity >= 1: print "SP: learn: ", learn print "SP: active outputs(%d): " % (len(onCellIndices)), onCellIndices self._runIter += 1 # Return inference result return self._onCells ############################################################################# def __getstate__(self): # Update our random states self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() state = self.__dict__.copy() # Delete ephemeral members that we don't want pickled for ephemeralMemberName in self._getEphemeralMembers(): if ephemeralMemberName in state: del state[ephemeralMemberName] return state ############################################################################# def __setstate__(self, state): self.__dict__.update(state) # ---------------------------------------------------------------------- # Support older checkpoints # These fields were added on 2010-10-05 and _iterNum was preserved if not hasattr(self, '_randomState'): self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() self._iterNum = 0 # ------------------------------------------------------------------------ # Init our random number generators random.setstate(self._randomState) numpy.random.set_state(self._numpyRandomState) self.random.setState(self._nupicRandomState) # Load things that couldn't be pickled... self._initEphemerals() ############################################################################ def getAnomalyScore(self): """ Get the aggregate anomaly score for this input pattern Returns: A single scalar value for the anomaly score """ numNonzero = len(numpy.nonzero(self._anomalyScores)[0]) return 1.0 / (numpy.sum(self._anomalyScores) + 1) ############################################################################# def getLearningStats(self): """ Return a dictionary containing a set of statistics related to learning. Here is a list of what is returned: 'activeCountAvg': The average number of active columns seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'underCoveragePct': The average under-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'overCoveragePct': The average over-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesAvg': The overall average number of connection changes made per active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMin': The minimum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMax': The maximum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'rfSize': The average receptive field size of the columns. 'inhibitionRadius': The average inihbition radius of the columns. 'targetDensityPct': The most recent target local area density used, as a percent (0 -> 100) 'coincidenceSizeAvg': The average learned coincidence size 'coincidenceSizeMin': The minimum learned coincidence size 'coincidenceSizeMax': The maximum learned coincidence size 'dcBeforeInhibitionAvg': The average of duty cycle before inhbition of all coincidences 'dcBeforeInhibitionMin': The minimum duty cycle before inhbition of all coincidences 'dcBeforeInhibitionAvg': The maximum duty cycle before inhbition of all coincidences 'dcAfterInhibitionAvg': The average of duty cycle after inhbition of all coincidences 'dcAfterInhibitionMin': The minimum duty cycle after inhbition of all coincidences 'dcAfterInhibitionAvg': The maximum duty cycle after inhbition of all coincidences 'firingBoostAvg': The average firing boost 'firingBoostMin': The minimum firing boost 'firingBoostMax': The maximum firing boost """ # Fill in the stats that can be computed on the fly. The transient stats # that depend on printPeriodicStats being on, have already been stored self._learningStats['rfRadiusAvg'] = self._rfRadiusAvg self._learningStats['rfRadiusMin'] = self._rfRadiusMin self._learningStats['rfRadiusMax'] = self._rfRadiusMax if self._inhibitionObj is not None: self._learningStats['inhibitionRadius'] = \ self._inhibitionObj.getInhibitionRadius() self._learningStats['targetDensityPct'] = \ 100.0 * self._inhibitionObj.getLocalAreaDensity() else: print "Warning: No inhibitionObj found for getLearningStats" self._learningStats['inhibitionRadius'] = 0.0 self._learningStats['targetDensityPct'] = 0.0 self._learningStats['coincidenceSizeAvg'] = \ self._masterConnectedCoincSizes.mean() self._learningStats['coincidenceSizeMin'] = \ self._masterConnectedCoincSizes.min() self._learningStats['coincidenceSizeMax'] = \ self._masterConnectedCoincSizes.max() if not self._doneLearning: self._learningStats['dcBeforeInhibitionAvg'] = \ self._dutyCycleBeforeInh.mean() self._learningStats['dcBeforeInhibitionMin'] = \ self._dutyCycleBeforeInh.min() self._learningStats['dcBeforeInhibitionMax'] = \ self._dutyCycleBeforeInh.max() self._learningStats['dcAfterInhibitionAvg'] = \ self._dutyCycleAfterInh.mean() self._learningStats['dcAfterInhibitionMin'] = \ self._dutyCycleAfterInh.min() self._learningStats['dcAfterInhibitionMax'] = \ self._dutyCycleAfterInh.max() self._learningStats['firingBoostAvg'] = \ self._firingBoostFactors.mean() self._learningStats['firingBoostMin'] = \ self._firingBoostFactors.min() self._learningStats['firingBoostMax'] = \ self._firingBoostFactors.max() return self._learningStats ############################################################################# def _seed(self, seed=-1): """ Initialize the random seed """ if seed != -1: self.random = NupicRandom(seed) random.seed(seed) numpy.random.seed(seed) else: self.random = NupicRandom() ############################################################################# def _initialPermanence(self): """ Create and return a 2D matrix filled with initial permanence values. The returned matrix will be of shape: (2*coincInputRadius + 1, 2*coincInputRadius + 1). The initial permanence values are set between 0 and 1.0, with enough chosen above synPermConnected to make it highly likely that a cell will pass stimulusThreshold, given the size of the potential RF, the input pool sampling percentage, and the expected density of the active inputs. If gaussianDist is True, the center of the matrix will contain the highest permanence values and lower values will be farther from the center. If gaussianDist is False, the highest permanence values will be evenly distributed throughout the potential RF. """ # Figure out the target number of connected synapses. We want about 2X # stimulusThreshold minOn = 2 * max(self.stimulusThreshold, 10) / self.coincInputPoolPct \ / self.inputDensity # ======================================================================== # Get the gaussian distribution, with max magnitude just slightly above # synPermConnected. Try to find a sigma that gives us about 2X # stimulusThreshold connected synapses after sub-sampling for # coincInputPoolPct. We will assume everything within +/- sigma will be # connected. This logic uses the fact that an x value of sigma generates a # magnitude of 0.6. if self.gaussianDist: # Only supported when we have 2D layouts if self._coincRFShape[0] != self._coincRFShape[1]: raise RuntimeError("Gaussian distibuted permanences are currently only" "supported for 2-D layouts") # The width and height of the center "blob" in inputs is the square root # of the area onAreaDim = numpy.sqrt(minOn) # Sigma is at the edge of the center blob sigma = onAreaDim/2 # Create the gaussian with a value of 1.0 at the center perms = self._gaussianMatrix(dim=max(self._coincRFShape), sigma=sigma) # The distance between the min and max values within the gaussian will # be given by 'grange'. In a gaussian, the value at sigma away from the # center is 0.6 * the value at the center. We want the values at sigma # to be synPermConnected maxValue = 1.0 / 0.6 * self.synPermConnected perms *= maxValue perms.shape = (-1,) # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # farther away than 2 sigma to 0. The value of a gaussing at 2 sigma # is 0.135 * the value at the center perms[perms < (0.135 * maxValue)] = 0 # ======================================================================== # Evenly distribute the permanences through the RF else: # Create a random distribution from 0 to 1. perms = numpy.random.random(self._coincRFShape) perms = perms.astype(realDType) # Set the range of values to be between 0 and # synPermConnected+synPermInctiveDec. This ensures that a pattern # will always be learned in 1 iteration maxValue = min(1.0, self.synPermConnected + self.synPermInactiveDec) # What percentage do we want to be connected? connectPct = 0.50 # What value from the 0 to 1 distribution will map to synPermConnected? threshold = 1.0 - connectPct # Which will be the connected and unconnected synapses? connectedSyns = perms >= threshold unconnectedSyns = numpy.logical_not(connectedSyns) # Squeeze all values between threshold and 1.0 to be between # synPermConnected and synPermConnected + synPermActiveInc / 4 # This makes sure the firing coincidence perms matching input bit get # greater than synPermConnected and other unconnectedSyns get deconnected # in one firing learning iteration. srcOffset = threshold srcRange = 1.0 - threshold dstOffset = self.synPermConnected dstRange = maxValue - self.synPermConnected perms[connectedSyns] = (perms[connectedSyns] - srcOffset)/srcRange \ * dstRange / 4.0 + dstOffset # Squeeze all values between 0 and threshold to be between 0 and # synPermConnected srcRange = threshold - 0.0 dstRange = self.synPermConnected - 0.0 perms[unconnectedSyns] = perms[unconnectedSyns]/srcRange \ * dstRange # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # below synPermActiveInc/2 to 0 perms[perms < (self.synPermActiveInc / 2.0)] = 0 perms.shape = (-1,) return perms ############################################################################# def _gaussianMatrix(self, dim, sigma): """ Create and return a 2D matrix filled with a gaussian distribution. The returned matrix will be of shape (dim, dim). The mean of the gaussian will be in the center of the matrix and have a value of 1.0. """ gaussian = lambda x,sigma: numpy.exp(-(x**2)/(2*(sigma**2))) # Allocate the matrix m = numpy.empty((dim, dim), dtype=realDType) # Find the center center = (dim - 1) / 2.0 # TODO: Simplify using numpy.meshgrid # Fill it in for y in xrange(dim): for x in xrange(dim): dist = numpy.sqrt((x-center)**2 + (y-center)**2) m[y,x] = gaussian(dist, sigma) return m ############################################################################# def _makeMasterCoincidences(self, numCloneMasters, coincRFShape, coincInputPoolPct, initialPermanence=None, nupicRandom=None): """Make the master coincidence matrices and mater input histograms. # TODO: Update this example >>> FDRCSpatial._makeMasterCoincidences(1, 2, 0.33) (array([[[ True, True, False, False, False], [False, True, False, False, True], [False, True, False, False, False], [False, False, False, True, False], [ True, False, False, False, False]]], dtype=bool), array([[[ 0.26982325, 0.19995725, 0. , 0. , 0. ], [ 0. , 0.94128972, 0. , 0. , 0.36316112], [ 0. , 0.06312726, 0. , 0. , 0. ], [ 0. , 0. , 0. , 0.29740077, 0. ], [ 0.81071907, 0. , 0. , 0. , 0. ]]], dtype=float32)) """ if nupicRandom is None: nupicRandom = NupicRandom(42) if initialPermanence is None: initialPermanence = self._initialPermanence() coincRfArea = (coincRFShape[0] * coincRFShape[1]) coincInputPool = coincInputPoolPct * coincRfArea # We will generate a list of sparse matrices masterPotentialM = [] masterPermanenceM = [] toSample = numpy.arange(coincRfArea, dtype='uint32') toUse = numpy.empty(coincInputPool, dtype='uint32') #denseM = numpy.zeros(coincRfArea, dtype='uint32') denseM = numpy.zeros(coincRfArea, dtype=realDType) for i in xrange(numCloneMasters): nupicRandom.getUInt32Sample(toSample, toUse) # Put in 1's into the potential locations denseM.fill(0) denseM[toUse] = 1 masterPotentialM.append(SM_01_32_32(denseM.reshape(coincRFShape))) # Put in the initial permanences denseM *= initialPermanence masterPermanenceM.append(SM32(denseM.reshape(coincRFShape))) # If we are not using common initial permanences, create another # unique one for the next cell if not self.commonDistributions: initialPermanence = self._initialPermanence() return masterPotentialM, masterPermanenceM ############################################################################# def _updateConnectedCoincidences(self, masters=None): """Update 'connected' version of the given coincidence. Each 'connected' coincidence is effectively a binary matrix (AKA boolean) matrix that is the same size as the input histogram matrices. They have a 1 wherever the inputHistogram is "above synPermConnected". """ # If no masterNum given, update all of them if masters is None: masters = xrange(self.numCloneMasters) (nCellRows, nCellCols) = self._coincRFShape cloningOn = (self.numCloneMasters != self._coincCount) for masterNum in masters: # Where are we connected? masterConnectedNZ = \ self._masterPermanenceM[masterNum].whereGreaterEqual(0, nCellRows, 0, nCellCols, self.synPermConnected) rowIdxs = masterConnectedNZ[:,0] colIdxs = masterConnectedNZ[:,1] self._masterConnectedM[masterNum].setAllNonZeros(nCellRows, nCellCols, rowIdxs, colIdxs) self._masterConnectedCoincSizes[masterNum] = len(rowIdxs) # Update the corresponding rows in the super, mondo connected matrix that # come from this master masterConnected = \ self._masterConnectedM[masterNum].toDense().astype('bool') # 0.2s if cloningOn: cells = self._cellsForMaster[masterNum] else: cells = [masterNum] for cell in cells: inputSlice = self._inputSlices[cell] coincSlice = self._coincSlices[cell] masterSubset = masterConnected[coincSlice] sparseCols = self._inputLayout[inputSlice][masterSubset] self._allConnectedM.replaceSparseRow(cell, sparseCols) # 4s. ############################################################################# def _setSlices(self): """Compute self._columnSlices and self._inputSlices self._inputSlices are used to index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. There is one item in the list for each column. self._coincSlices are used to index into the coinc (assuming it's been shaped to a 2D array) to get the valid area of the column. There is one item in the list for each column. This function is called upon unpickling, since we can't pickle slices. """ self._columnCenters = numpy.array(self._computeCoincCenters(self.inputShape, self.coincidencesShape, self.inputBorder)) coincInputRadius = self.coincInputRadius (coincHeight, coincWidth) = self._coincRFShape inputShape = self.inputShape inputBorder = self.inputBorder # --------------------------------------------------------------------- # Compute the input slices for each cell. This is the slice of the entire # input which intersects with the cell's permanence matrix. if self._hasTopology: self._inputSlices = [numpy.s_[max(0, cy-coincInputRadius): \ min(inputShape[0], cy+coincInputRadius + 1), max(0, cx-coincInputRadius): \ min(inputShape[1], cx+coincInputRadius + 1)] for (cy, cx) in self._columnCenters] else: self._inputSlices = [numpy.s_[0:inputShape[0], 0:inputShape[1]] for (cy, cx) in self._columnCenters] self._inputSlices2 = numpy.zeros((4*len(self._inputSlices)), dtype="uint32") k = 0 for i in range(len(self._inputSlices)): self._inputSlices2[k] = self._inputSlices[i][0].start self._inputSlices2[k + 1] = self._inputSlices[i][0].stop self._inputSlices2[k + 2] = self._inputSlices[i][1].start self._inputSlices2[k + 3] = self._inputSlices[i][1].stop k = k + 4 # --------------------------------------------------------------------- # Compute the coinc slices for each cell. This is which portion of the # cell's permanence matrix intersects with the input. if self._hasTopology: if self.inputShape[0] > 1: self._coincSlices = [numpy.s_[max(0, coincInputRadius - cy): \ min(coincHeight, coincInputRadius + inputShape[0] - cy), max(0, coincInputRadius-cx): \ min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:1, max(0, coincInputRadius-cx): \ min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:coincHeight, 0:coincWidth] for (cy, cx) in self._columnCenters] self._coincSlices2 = numpy.zeros((4*len(self._coincSlices)), dtype="uint32") k = 0 for i in range(len(self._coincSlices)): self._coincSlices2[k] = self._coincSlices[i][0].start self._coincSlices2[k + 1] = self._coincSlices[i][0].stop self._coincSlices2[k + 2] = self._coincSlices[i][1].start self._coincSlices2[k + 3] = self._coincSlices[i][1].stop k = k + 4 ############################################################################# @staticmethod def _computeCoincCenters(inputShape, coincidencesShape, inputBorder): """Compute the centers of all coincidences, given parameters. This function is semi-public: tools may use it to generate good visualizations of what the FDRCSpatial node is doing. NOTE: It must be static or global function so that it can be called by the ColumnActivityTab inspector *before* the first compute (before the SP has been constructed). If the input shape is (7,20), shown below with * for each input ******************** ******************** ******************** ******************** ******************** ******************** ******************** if inputBorder is 1, we distribute the coincidences evenly over the the area after removing the edges, @ shows the allowed input area below ******************** *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* ******************** each coincidence is centered at the closest @ and looks at a area with coincInputRadius below it This function call returns an iterator over the coincidence centers. Each element in iterator is a tuple: (y, x). The iterator returns elements in a fixed order. """ # Determine Y centers if inputShape[0] > 1: # 2-D layout startHeight = inputBorder stopHeight = inputShape[0] - inputBorder else: startHeight = stopHeight = 0 heightCenters = numpy.linspace(startHeight, stopHeight, coincidencesShape[0], endpoint=False).astype('int32') # Determine X centers startWidth = inputBorder stopWidth = inputShape[1] - inputBorder widthCenters = numpy.linspace(startWidth, stopWidth, coincidencesShape[1], endpoint=False).astype('int32') return list(cross(heightCenters, widthCenters)) ############################################################################# def _updateInhibitionObj(self): """ Calculate the average inhibitionRadius to use and update the inhibition object accordingly. This looks at the size of the average connected receptive field and uses that to determine the inhibition radius. """ # ======================================================================== # Compute the inhibition radius. # If using global inhibition, just set it to include the entire region if self.globalInhibition: avgRadius = max(self.coincidencesShape) # Else, set it based on the average size of the connected synapses area in # each cell. else: totalDim = 0 # Get the dimensions of the connected receptive fields of each cell to # compute the average minDim = numpy.inf maxDim = 0 for masterNum in xrange(self.numCloneMasters): masterConnected = self._masterConnectedM[masterNum] nzs = masterConnected.getAllNonZeros() (rows, cols) = zip(*nzs) rows = numpy.array(rows) cols = numpy.array(cols) if len(rows) >= 2: height = rows.max() - rows.min() + 1 else: height = 1 if len(cols) >= 2: width = cols.max() - cols.min() + 1 else: width = 1 avgDim = (height + width) / 2.0 minDim = min(minDim, avgDim) maxDim = max(maxDim, avgDim) totalDim += avgDim # Get average width/height in input space avgDim = totalDim / self.numCloneMasters self._rfRadiusAvg = (avgDim - 1.0) / 2.0 self._rfRadiusMin = (minDim - 1.0) / 2.0 self._rfRadiusMax = (maxDim - 1.0) / 2.0 # How many columns in cell space does it correspond to? if self.inputShape[0] > 1: # 2-D layout coincsPerInputX = float(self.coincidencesShape[1]) \ / (self.inputShape[1] - 2*self.inputBorder) coincsPerInputY = float(self.coincidencesShape[0]) \ / (self.inputShape[0] - 2*self.inputBorder) else: coincsPerInputX = coincsPerInputY = \ float(self.coincidencesShape[1] * self.coincidencesShape[0]) \ / (self.inputShape[1] - 2*self.inputBorder) avgDim *= (coincsPerInputX + coincsPerInputY) / 2 avgRadius = (avgDim - 1.0) / 2.0 avgRadius = max(1.0, avgRadius) # Can't be greater than the overall width or height of the level maxDim = max(self.coincidencesShape) avgRadius = min(avgRadius, maxDim) avgRadius = int(round(avgRadius)) # ======================================================================== # Is there a need to re-instantiate the inhibition object? if self._inhibitionObj is None \ or self._inhibitionObj.getInhibitionRadius() != avgRadius: # What is our target density? if self.localAreaDensity > 0: localAreaDensity = self.localAreaDensity else: numCellsPerInhArea = (avgRadius * 2.0 + 1.0) ** 2 totalCells = self.coincidencesShape[0] * self.coincidencesShape[1] numCellsPerInhArea = min(numCellsPerInhArea, totalCells) localAreaDensity = float(self.numActivePerInhArea) / numCellsPerInhArea # Don't let it be greater than 0.50 localAreaDensity = min(localAreaDensity, 0.50) if self.spVerbosity >= 2: print "Updating inhibition object:" print " avg. rfRadius:", self._rfRadiusAvg print " avg. inhRadius:", avgRadius print " Setting density to:", localAreaDensity self._inhibitionObj = Inhibition2(self.coincidencesShape[0], # height self.coincidencesShape[1], # width avgRadius, # inhRadius localAreaDensity) # density ############################################################################# def _updateMinDutyCycles(self, actDutyCycles, minPctDutyCycle, minDutyCycles): """ Calculate and update the minimum acceptable duty cycle for each cell based on the duty cycles of the cells within its inhibition radius and the minPctDutyCycle. Parameters: ----------------------------------------------------------------------- actDutyCycles: The actual duty cycles of all cells minPctDutyCycle: Each cell's minimum duty cycle will be set to minPctDutyCycle times the duty cycle of the most active cell within its inhibition radius minDutyCycles: This array will be updated in place with the new minimum acceptable duty cycles """ # What is the inhibition radius? inhRadius = self._inhibitionObj.getInhibitionRadius() # Reshape the actDutyCycles to match the topology of the level cloningOn = (self.numCloneMasters != self._coincCount) if not cloningOn: actDutyCycles = actDutyCycles.reshape(self.coincidencesShape) minDutyCycles = minDutyCycles.reshape(self.coincidencesShape) # Special, faster handling when inhibition radius includes the entire # set of cells if cloningOn or inhRadius >= max(self.coincidencesShape): minDutyCycle = minPctDutyCycle * actDutyCycles.max() minDutyCycles.fill(minPctDutyCycle * actDutyCycles.max()) # Else, process each cell else: (numRows, numCols) = self.coincidencesShape for row in xrange(numRows): top = max(0, row - inhRadius) bottom = min(row + inhRadius + 1, numRows) for col in xrange(numCols): left = max(0, col - inhRadius) right = min(col + inhRadius + 1, numCols) maxDutyCycle = actDutyCycles[top:bottom, left:right].max() minDutyCycles[row, col] = maxDutyCycle * minPctDutyCycle if self.spVerbosity >= 2: print "Actual duty cycles:" print fdru.numpyStr(actDutyCycles, '%.4f') print "Recomputed min duty cycles, using inhRadius of", inhRadius print fdru.numpyStr(minDutyCycles, '%.4f') ############################################################################# def _computeOverlapsPy(self, inputShaped, stimulusThreshold): """ Computes overlaps for every column for the current input in place. The overlaps less than stimulus threshold are set to zero here. For columns with input RF going off the edge of input field, only regions within the input field are considered. This is equivalent to padding the input field with zeros. Parameters: ------------------------------------------------------------------------ inputShaped: input at the current time step, shaped to the input topology stimulusThreshold: stimulusThreshold to use Member variables used/updated: ------------------------------------------------------------------------ _inputSlices: Index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. _coincSlices: Index into the coinc (assuming it's been shaped to a 2D array) to get the valid region of each column. _overlaps: Result is placed into this array which holds the overlaps of each column with the input """ flatInput = inputShaped.reshape(-1) self._allConnectedM.rightVecSumAtNZ_fast(flatInput, self._overlaps) # Apply stimulusThreshold # TODO: Is there a faster numpy operation for this? self._overlaps[self._overlaps < stimulusThreshold] = 0 self._overlapsNoBoost = self._overlaps.copy() ############################################################################# def _computeOverlapsCPP(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but using a C++ implementation. """ cpp_overlap(self._cloneMapFlat, self._inputSlices2, self._coincSlices2, inputShaped, self._masterConnectedM, stimulusThreshold, self._overlaps); ############################################################################# def _computeOverlapsTest(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but compares the python and C++ implementations. """ # Py version self._computeOverlapsPy(inputShaped, stimulusThreshold) overlaps2 = copy.deepcopy(self._overlaps) # C++ version self._computeOverlapsCPP(inputShaped, stimulusThreshold) if (abs(self._overlaps - overlaps2) > 1e-6).any(): print self._overlaps, overlaps2, abs(self._overlaps - overlaps2) import pdb; pdb.set_trace() sys.exit(0) ############################################################################# def _raiseAllPermanences(self, masterNum, minConnections=None, densePerm=None, densePotential=None): """ Raise all permanences of the given master. If minConnections is given, the permanences will be raised until at least minConnections of them are connected strength. If minConnections is left at None, all permanences will be raised by self._synPermBelowStimulusInc. After raising all permanences, we also "sparsify" the permanence matrix and set to 0 any permanences which are already very close to 0, this keeps the memory requirements of the sparse matrices used to store the permanences lower. Parameters: ---------------------------------------------------------------------------- masterNum: Which master to bump up minConnections: Desired number of connected synapses to have If None, then all permanences are simply bumped up by self._synPermBelowStimulusInc densePerm: The dense representation of the master's permanence matrix, if available. If not specified, we will create this from the stored sparse representation. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. The stored sparse matrix will ALWAYS be updated from the densePerm if the densePerm is provided. densePotential: The dense representation of the master's potential synapses matrix, if available. If not specified, we will create this from the stored sparse potential matrix. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. retval: (modified, numConnections) modified: True if any permanences were raised numConnections: Number of actual connected synapses (not computed if minConnections was None, so None is returned in that case.) """ # It's faster to perform this operation on the dense matrices and # then convert to sparse once we're done since we will be potentially # introducing and then later removing a bunch of non-zeros. # ------------------------------------------------------------------- # Get references to the sparse perms and potential syns for this master sparsePerm = self._masterPermanenceM[masterNum] sparsePotential = self._masterPotentialM[masterNum] # We will trim off all synapse permanences below this value to 0 in order # to keep the memory requirements of the SparseMatrix lower trimThreshold = self.synPermActiveInc / 2.0 # ------------------------------------------------------------------- # See if we already have the required number of connections. If we don't, # get the dense form of the permanences if we don't have them already if densePerm is None: # See if we already have enough connections, if so, we can avoid the # overhead of converting to dense if minConnections is not None: numConnected = sparsePerm.countWhereGreaterEqual( 0, self._coincRFShape[0], 0, self._coincRFShape[1], self.synPermConnected) if numConnected >= minConnections: return (False, numConnected) densePerm = self._masterPermanenceM[masterNum].toDense() elif minConnections is not None: numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (False, numConnected) # Get the dense form of the potential synapse locations if densePotential is None: densePotential = self._masterPotentialM[masterNum].toDense() # ------------------------------------------------------------------- # Form the array with the increments incrementM = densePotential.astype(realDType) incrementM *= self._synPermBelowStimulusInc # ------------------------------------------------------------------- # Increment until we reach our target number of connections assert (densePerm.dtype == realDType) while True: densePerm += incrementM if minConnections is None: numConnected = None break numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: break # ------------------------------------------------------------------- # Convert back to sparse form and trim any values that are already # close to zero sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (True, numConnected) ############################################################################# def _bumpUpWeakCoincidences(self): """ This bump-up ensures every coincidence have non-zero connections. We find all coincidences which have overlaps less than stimulus threshold. We add synPermActiveInc to all the synapses. This step when repeated over time leads to synapses crossing synPermConnected threshold. """ # Update each cell's connected threshold based on the duty cycle before # inhibition. The connected threshold is linearly interpolated # between the points (dutyCycle:0, thresh:0) and (dutyCycle:minDuty, # thresh:synPermConnected). This is a line defined as: y = mx + b # thresh = synPermConnected/minDuty * dutyCycle bumpUpList = (self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh).nonzero()[0] for master in bumpUpList: self._raiseAllPermanences(master) # Update the connected synapses for each master we touched self._updateConnectedCoincidences(bumpUpList) if self.spVerbosity >= 2 and len(bumpUpList) > 0: print "Bumping up permanences in following cells due to falling below" \ "minDutyCycleBeforeInh:", bumpUpList ############################################################################# def _updateBoostFactors(self): """ Update the boost factors. The boost factors is linearly interpolated between the points (dutyCycle:0, boost:maxFiringBoost) and (dutyCycle:minDuty, boost:1.0). This is a line defined as: y = mx + b boost = (1-maxFiringBoost)/minDuty * dutyCycle + maxFiringBoost Parameters: ------------------------------------------------------------------------ boostFactors: numpy array of boost factors, defined per master """ if self._minDutyCycleAfterInh.sum() > 0: self._firingBoostFactors = (1 - self.maxFiringBoost)\ /self._minDutyCycleAfterInh * self._dutyCycleAfterInh \ + self.maxFiringBoost self._firingBoostFactors[self._dutyCycleAfterInh \ > self._minDutyCycleAfterInh] = 1.0 # if self._dutyCycleAfterInh.min() == 0: # where there are unlearned coincs # self._firingBoostFactors[self._dutyCycleAfterInh == 0] = \ # self.maxFiringBoost ############################################################################# def _updateInputUse(self, onCellIndices): """ During learning (adapting permanence values), we need to be able to tell which inputs are going to 2 or more active cells at once. We step through each coinc and mark all the inputs it is connected to. The inputUse array acts as a counter for the number of connections to the coincs from each input. Parameters: ------------------------------------------------------------------------ inputUse: numpy array of number of coincs connected to each input """ allConnected = SM32(self._allConnectedM) # TODO: avoid this copy self._inputUse[:] = allConnected.addListOfRows( onCellIndices).reshape(self.inputShape) ############################################################################# def _adaptSynapses(self, onCellIndices, orphanCellIndices, input): """ This is the main function in learning of SP. The permanence values are changed based on the learning rules. Parameters: ------------------------------------------------------------------------ onCellIndices: columns which are turned on after inhibition. The permanence values of these coincs are adapted based on the input. orphanCellIndices: columns which had very high overlap with the input, but ended up being inhibited input: Input, shaped to the input topology retval: list of masterCellIndices that were actually updated, or None if cloning is off """ # Capturing learning stats? if self.printPeriodicStats > 0: self._stats['explainedInputsCurIteration'] = set() # Precompute the active, inactive, and dupe inputs up front for speed # TODO: put these into pre-allocated arrays for speed self._activeInput[:] = input # Create a matrix containing the default permanence deltas for each input self._permChanges.fill(-1 * self.synPermInactiveDec) self._permChanges[self._activeInput] = self.synPermActiveInc if self.synPermActiveSharedDec != 0: numpy.logical_and(self._activeInput, self._inputUse>1, self._dupeInput) self._permChanges[self._dupeInput] -= self.synPermActiveSharedDec # Cloning? If so, scramble the onCells so that we pick a random one to # update for each master. We only update a master cell at most one time # per input presentation. cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: # Scramble the onCellIndices so that we pick a random one to update onCellIndices = list(onCellIndices) random.shuffle(onCellIndices) visitedMasters = set() # For the firing cells, update permanence values for columnNum in itertools.chain(onCellIndices, orphanCellIndices): # Get the master number masterNum = self._cloneMapFlat[columnNum] # If cloning, only visit each master once if cloningOn: if masterNum in visitedMasters: continue visitedMasters.add(masterNum) # Get the slices of input that overlap with the valid area of this master inputSlice = self._inputSlices[columnNum] rfActiveInput = self._activeInput[inputSlice] rfPermChanges = self._permChanges[inputSlice] # Get the potential synapses, permanence values, and connected synapses # for this master masterPotential = self._masterPotentialM[masterNum].toDense() masterPermanence = self._masterPermanenceM[masterNum].toDense() masterConnected = \ self._masterConnectedM[masterNum].toDense().astype('bool') # Make changes only over the areas that overlap the input level. For # coincidences near the edge of the level for example, this excludes the # synapses outside the edge. coincSlice = self._coincSlices[columnNum] masterValidPermanence= masterPermanence[coincSlice] # Capturing learning stats? if self.printPeriodicStats > 0: masterValidConnected = masterConnected[coincSlice] explainedInputs = self._inputLayout[inputSlice][masterValidConnected] self._stats['explainedInputsCurIteration'].update(explainedInputs) if self.spVerbosity >= 3: print " adapting cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " initialConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) print " firingLevel: %d" % (self._overlaps[columnNum]) print " firingBoostFactor: %f" % (self._firingBoostFactors[masterNum]) print " input slice: \n" self._printInputSlice(rfActiveInput, prefix=' ') # Update permanences given the active input (NOTE: The "FP" in this # function name stands for "Function Pointer"). if columnNum in orphanCellIndices: # Decrease permanence of active inputs masterValidPermanence[rfActiveInput] -= self.synPermOrphanDec else: self._updatePermanenceGivenInputFP(columnNum, masterNum, input, self._inputUse, masterPermanence, masterValidPermanence, rfActiveInput, rfPermChanges) # Clip to absolute min and max permanence values numpy.clip(masterPermanence, self._synPermMin, self._synPermMax, out=masterPermanence) # Keep only the potential syns for this cell numpy.multiply(masterPermanence, masterPotential, masterPermanence) # If we are tracking learning stats, prepare to see how many changes # were made to the cell connections if self.printPeriodicStats > 0: masterConnectedOrig = SM_01_32_32(self._masterConnectedM[masterNum]) # --------------------------------------------------------------------- # If the number of connected synapses happens to fall below # stimulusThreshold, bump up all permanences a bit. # We could also just wait for the "duty cycle falls below # minDutyCycleBeforeInb" logic to catch it, but doing it here is # pre-emptive and much faster. # # The "duty cycle falls below minDutyCycleBeforeInb" logic will still # catch other possible situations, like: # * if the set of inputs a cell learned suddenly stop firing due to # input statistic changes # * damage to the level below # * input is very sparse and we still don't pass stimulusThreshold even # with stimulusThreshold conneted synapses. self._raiseAllPermanences(masterNum, minConnections=self.stimulusThreshold, densePerm=masterPermanence, densePotential=masterPotential) # Update the matrices that contain the connected syns for this cell. self._updateConnectedCoincidences([masterNum]) # If we are tracking learning stats, see how many changes were made to # this cell's connections if self.printPeriodicStats > 0: origNumConnections = masterConnectedOrig.nNonZeros() masterConnectedOrig.logicalAnd(self._masterConnectedM[masterNum]) numUnchanged = masterConnectedOrig.nNonZeros() numChanges = origNumConnections - numUnchanged numChanges += self._masterConnectedM[masterNum].nNonZeros() \ - numUnchanged self._stats['numChangedConnectionsSum'][masterNum] += numChanges self._stats['numLearns'][masterNum] += 1 # Verbose? if self.spVerbosity >= 3: print " done cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " newConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) self._printSyns(columnNum, prefix=' ', showValues=(self.spVerbosity >= 4)) print # Return list of updated masters if cloningOn: return list(visitedMasters) else: return onCellIndices ############################################################################# def _updatePermanenceGivenInputPy(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Given the input to a master coincidence, update it's permanence values based on our learning rules. On Entry, we are given the slice of the permanence matrix that corresponds only to the area of the coincidence master that is within the borders of the entire input field. Parameters: ------------------------------------------------------------------------ columnNum: The column number of this cell masterNum: The master coincidence that corresponds to this column input: The entire input, shaped appropriately inputUse: The same shape as input. Each entry is a count of the number of *currently active cells* that are connected to that input. permanence: The entire masterPermanence matrix for this master permanenceSlice: The slice of the masterPermanence matrix for this master that intersects the input field, i.e. does not overhang the outside edges of the input. activeInputSlice: The portion of 'input' that intersects permanenceSlice, set to True where input != 0 permChangesSlice: The portion of 'input' that intersects permanenceSlice, set to self.synPermActiveInc where input != 0 and self.synPermInactiveDec where the input == 0. This is used to optimally apply self.synPermActiveInc and self.synPermInactiveDec at the same time and can be used for any cell whose _synPermBoostFactor is set to 1.0. """ # Apply the baseline increment/decrements permanenceSlice += permChangesSlice # If this cell has permanence boost, apply the incremental ############################################################################# def _updatePermanenceGivenInputCPP(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but using a C++ implementation. """ inputNCols = self.inputShape[1] masterNCols = self._masterPotentialM[masterNum].shape[1] #TODO: synPermBoostFactors has been removed. CPP implementation has not been updated for this. adjustMasterValidPermanence(columnNum, masterNum, inputNCols, masterNCols, self.synPermActiveInc, self.synPermInactiveDec, self.synPermActiveSharedDec, input, inputUse, self._inputSlices2, self._coincSlices2, self._synPermBoostFactors, permanence) ############################################################################# def _updatePermanenceGivenInputTest(self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but compares the python and C++ implementations. """ mp2 = copy.deepcopy(permanence) mvp2 = copy.deepcopy(permanenceSlice) # Py version import pdb; pdb.set_trace() self._updatePermanenceGivenInputPy(columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice) # C++ version self._updatePermanenceGivenInputCPP(columnNum, masterNum, input, inputUse, mp2, mvp2, activeInputSlice, permChangesSlice) if abs(mp2 - permanence).max() > 1e-6: print abs(mp2 - permanence).max() import pdb; pdb.set_trace() sys.exit(0) ############################################################################# def _periodicStatsCreate(self): """ Allocate the periodic stats structure Parameters: ------------------------------------------------------------------ """ self._stats = dict() self._stats['numChangedConnectionsSum'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['numLearns'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['maxBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # This dict maintains mappings of specific input patterns to specific # output patterns. It is used to detect "thrashing" of cells. We measure # how similar the output presentation of a specific input is to the # last time we saw it. self._stats['inputPatterns'] = dict() self._stats['inputPatternsLimit'] = 5000 self._periodicStatsReset() ############################################################################# def _periodicStatsReset(self): """ Reset the periodic stats this is done every N iterations before capturing a new set of stats Parameters: ------------------------------------------------------------------ """ self._stats['numSamples'] = 0 self._stats['numOnSum'] = 0 self._stats['underCoveragePctSum'] = 0 self._stats['overCoveragePctSum'] = 0 self._stats['cellOverlapSums'] = 0 self._stats['cellPctOverlapSums'] = 0 self._stats['explainedInputsCurIteration'] = set() self._stats['startTime'] = time.time() # These keep a count of the # of changed connections per update # for each master self._stats['numChangedConnectionsSum'].fill(0) self._stats['numLearns'].fill(0) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'].fill(self.maxFiringBoost) self._stats['maxBoostFactor'].fill(0) # This keeps track of the average distance between the SP output of # a specific input pattern now and the last time we saw it. self._stats['outputPatternDistanceSum'] = 0 self._stats['outputPatternSamples'] = 0 ############################################################################# def _periodicStatsComputeEnd(self, activeCells, activeInputs): """ Called at the end of compute. This increments the number of computes and also summarizes the under and over coverage and whatever other periodic stats we need. If the period is up, it then prints the accumuated stats and resets them for the next period Parameters: ------------------------------------------------------------------ activeCells: list of the active cells activeInputs: list of the active inputs """ # Update number of samples self._stats['numSamples'] += 1 # Compute under and over coverage numOn = len(activeCells) self._stats['numOnSum'] += numOn expInput = self._stats['explainedInputsCurIteration'] inputLen = len(activeInputs) underCoverage = len(set(activeInputs).difference(expInput)) self._stats['underCoveragePctSum'] += float(underCoverage) / inputLen expInput.difference_update(activeInputs) overCoverage = len(expInput) self._stats['overCoveragePctSum'] += float(overCoverage) / inputLen # Keep track of the min and max boost factor seen for each column numpy.minimum(self._firingBoostFactors, self._stats['minBoostFactor'], self._stats['minBoostFactor']) numpy.maximum(self._firingBoostFactors, self._stats['maxBoostFactor'], self._stats['maxBoostFactor']) # Calculate the distance in the SP output between this input now # and the last time we saw it. inputPattern = str(sorted(activeInputs)) (outputNZ, sampleIdx) = \ self._stats['inputPatterns'].get(inputPattern, (None, None)) activeCellSet = set(activeCells) if outputNZ is not None: distance = len(activeCellSet.difference(outputNZ)) \ + len(outputNZ.difference(activeCellSet)) #print "DISTANCE: ", distance #if len(self._stats['inputPatterns']) == 100 and distance > 0: # print "input pattern (%d):" % (sampleIdx), inputPattern # print "prior output:", sorted(outputNZ) # print "new output:", sorted(activeCellSet) # print "distance: ", distance self._stats['inputPatterns'][inputPattern] = (activeCellSet, sampleIdx) self._stats['outputPatternDistanceSum'] += distance self._stats['outputPatternSamples'] += 1 # Add this sample to our dict, if it's not too large already elif len(self._stats['inputPatterns']) < self._stats['inputPatternsLimit']: self._stats['inputPatterns'][inputPattern] = \ (activeCellSet, self._iterNum) # ----------------------------------------------------------------------- # If it's not time to print them out, return now if (self._iterNum % self.printPeriodicStats) != 0: return numSamples = float(self._stats['numSamples']) # Calculate number of changes made per master masterTouched = numpy.where(self._stats['numLearns'] > 0) if len(masterTouched[0]) == 0: numMasterChanges = numpy.zeros(1) else: numMasterChanges = self._stats['numChangedConnectionsSum'][masterTouched] numMasterChanges /= self._stats['numLearns'][masterTouched] # This fills in the static learning stats into self._learningStats self.getLearningStats() # Calculate and copy the transient learning stats into the # self._learningStats dict, for possible retrieval later by # the getLearningStats() method self._learningStats['elapsedTime'] = time.time() - self._stats['startTime'] self._learningStats['activeCountAvg'] = self._stats['numOnSum'] / numSamples self._learningStats['underCoveragePct'] = \ 100.0*self._stats['underCoveragePctSum'] / numSamples self._learningStats['overCoveragePct'] = \ 100.0*self._stats['overCoveragePctSum'] / numSamples self._learningStats['numConnectionChangesAvg'] = numMasterChanges.mean() self._learningStats['numConnectionChangesMin'] = numMasterChanges.min() self._learningStats['numConnectionChangesMax'] = numMasterChanges.max() self._learningStats['avgCellOverlap'] = \ float(self._stats['cellOverlapSums']) / max(1, self._stats['numOnSum']) self._learningStats['avgCellPctOverlap'] = \ 100.0*self._stats['cellPctOverlapSums'] / max(1, self._stats['numOnSum']) self._learningStats['firingBoostMaxChangePct'] = 100.0 \ * (self._stats['maxBoostFactor'] / self._stats['minBoostFactor']).max() \ - 100.0 self._learningStats['outputRepresentationChangeAvg'] = \ float(self._stats['outputPatternDistanceSum']) / \ max(1, self._stats['outputPatternSamples']) self._learningStats['outputRepresentationChangePctAvg'] = \ 100.0 * self._learningStats['outputRepresentationChangeAvg'] / \ max(1,self._learningStats['activeCountAvg']) self._learningStats['numUniqueInputsSeen'] = \ len(self._stats['inputPatterns']) if self._learningStats['numUniqueInputsSeen'] >= \ self._stats['inputPatternsLimit']: self._learningStats['numUniqueInputsSeen'] = -1 # ------------------------------------------------------------------- # Print all stats captured print "Learning stats for the last %d iterations:" % (numSamples) print " iteration #: %d" % (self._iterNum) print " inference iteration #: %d" % (self._inferenceIterNum) print " elapsed time: %.2f" \ % (self._learningStats['elapsedTime']) print " avg activeCount: %.1f" \ % (self._learningStats['activeCountAvg']) print " avg under/overCoverage: %-6.1f / %-6.1f %%" \ % (self._learningStats['underCoveragePct'], self._learningStats['overCoveragePct']) print " avg cell overlap: %-6.1f / %-6.1f %%" \ % (self._learningStats['avgCellOverlap'], self._learningStats['avgCellPctOverlap']) print " avg/min/max RF radius: %-6.1f / %-6.1f / %-6.1f" \ % (self._learningStats['rfRadiusAvg'], self._learningStats['rfRadiusMin'], self._learningStats['rfRadiusMax']) print " inhibition radius: %d" \ % (self._learningStats['inhibitionRadius']) print " target density: %.5f %%" \ % (self._learningStats['targetDensityPct']) print " avg/min/max coinc. size: %-6.1f / %-6d / %-6d" \ % (self._learningStats['coincidenceSizeAvg'], self._learningStats['coincidenceSizeMin'], self._learningStats['coincidenceSizeMax']) print " avg/min/max DC before inh: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['dcBeforeInhibitionAvg'], self._learningStats['dcBeforeInhibitionMin'], self._learningStats['dcBeforeInhibitionMax']) print " avg/min/max DC after inh: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['dcAfterInhibitionAvg'], self._learningStats['dcAfterInhibitionMin'], self._learningStats['dcAfterInhibitionMax']) print " avg/min/max boost: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['firingBoostAvg'], self._learningStats['firingBoostMin'], self._learningStats['firingBoostMax']) print " avg/min/max # conn. changes: %-6.4f / %-6.4f / %-6.4f" \ % (self._learningStats['numConnectionChangesAvg'], self._learningStats['numConnectionChangesMin'], self._learningStats['numConnectionChangesMax']) print " max change in boost: %.1f %%" \ % (self._learningStats['firingBoostMaxChangePct']) print " avg change in output repr.: %-6.1f / %-6.1f %%" \ % (self._learningStats['outputRepresentationChangeAvg'], 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) print " # of unique input pats seen: %d" \ % (self._learningStats['numUniqueInputsSeen']) #self._printMemberSizes() # Reset the stats for the next period self._periodicStatsReset() ############################################################################## def _printInputSlice(self, inputSlice, prefix=''): """ Print the given input slice in a nice human readable format. Parameters: --------------------------------------------------------------------- cell: The slice of input to print prefix: This is printed at the start of each row of the coincidence """ # Shape of each coincidence (rfHeight, rfWidth) = inputSlice.shape syns = inputSlice != 0 def _synStr(x): if not x: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) print prefix, ''.join(items) ############################################################################## def _printSyns(self, cell, prefix='', showValues=False): """ Print the synapse permanence values for the given cell in a nice, human, readable format. Parameters: --------------------------------------------------------------------- cell: which cell to print prefix: This is printed at the start of each row of the coincidence showValues: If True, print the values of each permanence. If False, just print a ' ' if not connected and a '*' if connected """ # Shape of each coincidence (rfHeight, rfWidth) = self.inputShape # Get the synapse permanences. masterNum = self._cloneMapFlat[cell] syns = self._masterPermanenceM[masterNum].toDense() if showValues: def _synStr(x): if x == 0: return ' -- ' elif x < 0.001: return ' 0 ' elif x >= self.synPermConnected: return '#%3.2f' % x else: return ' %3.2f' % x else: def _synStr(x): if x < self.synPermConnected: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) if showValues: print prefix, ' '.join(items) else: print prefix, ''.join(items) ############################################################################## def _printMemberSizes(self, over=100): """ Print the size of each member """ members = self.__dict__.keys() sizeNamePairs = [] totalSize = 0 for member in members: item = self.__dict__[member] if hasattr(item, '__func__'): continue try: if hasattr(item, '__len__'): size = 0 for i in xrange(len(item)): size += len(cPickle.dumps(item[i])) else: size = len(cPickle.dumps(item)) except: print "WARNING: Can't pickle %s" % (member) size = 0 sizeNamePairs.append((size, member)) totalSize += size # Print them out from highest to lowest sizeNamePairs.sort(reverse=True) for (size, name) in sizeNamePairs: if size > over: print "%10d (%10.3fMb) %s" % (size, size/1000000.0, name) print "\nTOTAL: %10d (%10.3fMB) " % (totalSize, totalSize/1000000.0) ############################################################################## def printParams(self): """ Print the main creation parameters associated with this instance. """ print "FDRCSpatial2 creation parameters: " print "inputShape =", self.inputShape print "inputBorder =", self.inputBorder print "inputDensity =", self.inputDensity print "coincidencesShape =", self.coincidencesShape print "coincInputRadius =", self.coincInputRadius print "coincInputPoolPct =", self.coincInputPoolPct print "gaussianDist =", self.gaussianDist print "commonDistributions =", self.commonDistributions print "localAreaDensity =", self.localAreaDensity print "numActivePerInhArea =", self.numActivePerInhArea print "stimulusThreshold =", self.stimulusThreshold print "synPermInactiveDec =", self.synPermInactiveDec print "synPermActiveInc =", self.synPermActiveInc print "synPermActiveSharedDec =", self.synPermActiveSharedDec print "synPermOrphanDec =", self.synPermOrphanDec print "synPermConnected =", self.synPermConnected print "minPctDutyCycleBeforeInh =", self.minPctDutyCycleBeforeInh print "minPctDutyCycleAfterInh =", self.minPctDutyCycleAfterInh print "dutyCyclePeriod =", self.dutyCyclePeriod print "maxFiringBoost =", self.maxFiringBoost print "maxSSFiringBoost =", self.maxSSFiringBoost print "maxSynPermBoost =", self.maxSynPermBoost print "minDistance =", self.minDistance print "spVerbosity =", self.spVerbosity print "printPeriodicStats =", self.printPeriodicStats print "testMode =", self.testMode print "numCloneMasters =", self.numCloneMasters
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libtree(CMakePackage): """ldd as a tree with an option to bundle dependencies into a single folder""" homepage = "https://github.com/haampie/libtree" url = "https://github.com/haampie/libtree/releases/download/v1.0.3/sources.tar.gz" maintainers = ['haampie'] version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b') libtree package: add version 1.2.0, 1.1.4, and 1.1.3 (#17035) # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libtree(CMakePackage): """ldd as a tree with an option to bundle dependencies into a single folder""" homepage = "https://github.com/haampie/libtree" url = "https://github.com/haampie/libtree/releases/download/v1.0.3/sources.tar.gz" maintainers = ['haampie'] version('1.2.0', sha256='4316a52aed7c8d2f7d2736c935bbda952204be92e56948110a143283764c427c') version('1.1.4', sha256='cfafb24c8f5e0d356c82777c338d58730ca6f3cb76dfe8a6857ee3ad65bf8be7') version('1.1.3', sha256='7baf5aaecd3a076bf1e7a1aa86979e7b841ab3f678ca8ac0e2a22bbbccf0dd06') version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b')
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """Spatial pooler implementation. TODO: Change print statements to use the logging module. """ import copy import cPickle import inspect import itertools import math import numpy import numpy.random from operator import itemgetter import os import random import struct import sys import time from nupic.bindings.algorithms import (adjustMasterValidPermanence, cpp_overlap, cpp_overlap_sbm, Inhibition2) from nupic.bindings.math import (count_gte, GetNTAReal, Random as NupicRandom, SM_01_32_32, SM32) from nupic.math.cross import cross from nupic.research import fdrutilities as fdru realDType = GetNTAReal() gPylabInitialized = False # kDutyCycleFactor add dutyCycleAfterInh to overlap in Inhibition step to be a # tie breaker kDutyCycleFactor = 0.01 def _extractCallingMethodArgs(): """ Returns args dictionary from the calling method """ callingFrame = inspect.stack()[1][0] argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame) argNames.remove("self") args = copy.copy(frameLocalVarDict) for varName in frameLocalVarDict: if varName not in argNames: args.pop(varName) return args class FDRCSpatial2(object): """ Class for spatial pooling based on fixed random distributed representation (FDR). This version of FDRCSpatial inlcudes adaptive receptive fields, no-dupe rules and gradual boosting. It supports 1-D and 2-D topologies with cloning. """ def __init__(self, inputShape=(32, 32), inputBorder=8, inputDensity=1.0, coincidencesShape=(48, 48), coincInputRadius=16, coincInputPoolPct=1.0, gaussianDist=False, commonDistributions=False, localAreaDensity=-1.0, numActivePerInhArea=10.0, stimulusThreshold=0, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermActiveSharedDec=0.0, synPermOrphanDec=0.0, synPermConnected=0.10, minPctDutyCycleBeforeInh=0.001, minPctDutyCycleAfterInh=0.001, dutyCyclePeriod=1000, maxFiringBoost=10.0, maxSSFiringBoost=2.0, maxSynPermBoost=10.0, minDistance=0.0, cloneMap=None, numCloneMasters=-1, seed=-1, spVerbosity=0, printPeriodicStats=0, testMode=False, globalInhibition=False, spReconstructionParam="unweighted_mean", useHighTier=True, randomSP=False, ): """ Parameters: ---------------------------- inputShape: The dimensions of the input vector. Format is (height, width) e.g. (24, 72). If the input is from a sensor, it is interpreted as having a 2-D topology of 24 pixels high and 72 wide. inputBorder: The first column from an edge will be centered over an input which is 'inputBorder' inputs from the edge. inputDensity: The density of the input. This is only to aid in figuring out the initial number of connected synapses to place on each column. The lower the inputDensity, the more initial connections will be assigned to each column. coincidencesShape: The dimensions of column layout. Format is (height, width) e.g. (80,100) means a total of 80*100 = 800 are arranged in a 2-D topology with 80 rows and 100 columns. coincInputRadius: This defines the max radius of the receptive field of each column. This is used to limit memory requirements and processing time. It could be set large enough to encompass the entire input field and the SP would still work fine, but require more memory and processing time. This parameter defines a square area: a column will have a max square RF with sides of length 2 * coincInputRadius + 1. coincInputPoolPct What percent of the columns's receptive field is available for potential synapses. At initialization time, we will choose coincInputPoolPct * (2*coincInputRadius + 1)^2 potential synapses from the receptive field. gaussianDist: If true, the initial permanences assigned to each column will have a gaussian distribution to them, making the column favor inputs directly below it over inputs farther away. If false, the initial permanences will have a random distribution across the column's entire potential receptive field. commonDistributions: If set to True (the default, faster startup time), each column will be given the same initial permanence values. This is normally OK when you will be training, but if you will be sticking with the untrained network, you will want to set this to False (which makes startup take longer). localAreaDensity: The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area). numActivePerInhArea: An alternate way to control the density of the active columns. If numActivePerInhArea is specified then localAreaDensity must be -1, and vice versa. When using numActivePerInhArea, the inhibition logic will insure that at most 'numActivePerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields. stimulusThreshold: This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. synPermInactiveDec: How much an inactive synapse is decremented, specified as a percent of a fully grown synapse. synPermActiveInc: How much to increase the permanence of an active synapse, specified as a percent of a fully grown synapse. synPermActiveSharedDec: How much to decrease the permanence of an active synapse which is connected to another column that is active at the same time. Specified as a percent of a fully grown synapse. synPermOrphanDec: How much to decrease the permanence of an active synapse on a column which has high overlap with the input, but was inhibited (an "orphan" column). synPermConnected: The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing. Typical value is 0.10. Cells whose activity level before inhibition falls below minDutyCycleBeforeInh will have their own internal synPermConnectedCell threshold set below this default value. (This concept applies to both SP and TP and so 'cells' is correct here as opposed to 'columns') minPctDutyCycleBeforeInh: A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the duty cycle before inhibition of all other column within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle before inhibition falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns due to the no-dupe rule. minPctDutyCycleAfterInh: A number between 0 and 1.0, used to set a floor on how often a column should turn ON after inhibition. Periodically, each column looks at the duty cycle after inhibition of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased. dutyCyclePeriod: The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate. maxFiringBoost: The maximum firing level boost factor. Each column's raw firing strength gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxFiringBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxFiringBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. maxSSFiringBoost: Once a column turns ON, it's boost will immediately fall down to maxSSFiringBoost if it is above it. This is accomplished by internally raising it's computed duty cycle accordingly. This prevents a cell which has had it's boost raised extremely high from turning ON for too many diverse inputs in a row within a short period of time. maxSynPermBoost: The maximum synPermActiveInc boost factor. Each column's synPermActiveInc gets multiplied by a boost factor to make the column more or less likely to form new connections. The actual boost factor used is a number between 1.0 and maxSynPermBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxSynPermBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. minDistance: This parameter impacts how finely the input space is quantized. It is a value between 0 and 1.0. If set to 0, then every unique input presentation will generate a unique output representation, within the limits of the total number of columns available. Higher values will tend to group similar inputs together into the same output representation. Only column which overlap with the input less than 100*(1.0-minDistance) percent will have a possibility of losing the inhibition competition against a boosted, 'bored' cell. cloneMap: An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. numCloneMasters: The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. seed: Seed for our own pseudo-random number generator. spVerbosity: spVerbosity level: 0, 1, 2, or 3 printPeriodicStats: If > 0, then every 'printPeriodicStats' iterations, the SP will print to stdout some statistics related to learning, such as the average pct under and over-coverage, average number of active columns, etc. in the last 'showLearningStats' iterations. testMode: If True, run the SP in test mode. This runs both the C++ and python implementations on all internal functions that support both and insures that both produce the same result. globalInhibition: If true, enforce the localAreaDensity/numActivePerInhArea globally over the entire region, ignoring any dynamically calculated inhibitionRadius. In effect, this is the same as setting the inhibition radius to include the entire region. spReconstructionParam:Specifies which SP reconstruction optimization to be used. Each column's firing strength is weighted by the percent Overlap, permanence or duty Cycle if this parameter is set to 'pctOverlap', 'permanence', or 'dutycycle' respectively. If parameter is set to 'maximum_firingstrength', the maximum of the firing strengths (weighted by permanence) is used instead of the weighted sum. useHighTier: The "high tier" feature is to deal with sparse input spaces. If over (1-minDistance) percent of a column's connected synapses are active, it will automatically become one of the winning columns. If False, columns are activated based on their absolute overlap with the input. Also, boosting will be disabled to prevent pattern oscillation randomSP: If True, the SP will not update its permanences and will instead use it's initial configuration for all inferences. """ # Save our __init__ args for debugging self._initArgsDict = _extractCallingMethodArgs() # Handle people instantiating us directly that don't pass in a cloneMap... # This creates a clone map without any cloning if cloneMap is None: cloneMap, numCloneMasters = fdru.makeCloneMap( columnsShape=coincidencesShape, outputCloningWidth=coincidencesShape[1], outputCloningHeight=coincidencesShape[0] ) self.numCloneMasters = numCloneMasters self._cloneMapFlat = cloneMap.reshape((-1,)) # Save creation parameters self.inputShape = int(inputShape[0]), int(inputShape[1]) self.inputBorder = inputBorder self.inputDensity = inputDensity self.coincidencesShape = coincidencesShape self.coincInputRadius = coincInputRadius self.coincInputPoolPct = coincInputPoolPct self.gaussianDist = gaussianDist self.commonDistributions = commonDistributions self.localAreaDensity = localAreaDensity self.numActivePerInhArea = numActivePerInhArea self.stimulusThreshold = stimulusThreshold self.synPermInactiveDec = synPermInactiveDec self.synPermActiveInc = synPermActiveInc self.synPermActiveSharedDec = synPermActiveSharedDec self.synPermOrphanDec = synPermOrphanDec self.synPermConnected = synPermConnected self.minPctDutyCycleBeforeInh = minPctDutyCycleBeforeInh self.minPctDutyCycleAfterInh = minPctDutyCycleAfterInh self.dutyCyclePeriod = dutyCyclePeriod self.maxFiringBoost = maxFiringBoost self.maxSSFiringBoost = maxSSFiringBoost self.maxSynPermBoost = maxSynPermBoost self.minDistance = minDistance self.spVerbosity = spVerbosity self.printPeriodicStats = printPeriodicStats self.testMode = testMode self.globalInhibition = globalInhibition self.spReconstructionParam = spReconstructionParam self.useHighTier= useHighTier != 0 self.randomSP = randomSP != 0 if not self.useHighTier: self.minPctDutyCycleAfterInh = 0 self.fileCount = 0 self._runIter = 0 # Start at iteration #0 self._iterNum = 0 # Number of learning iterations self._inferenceIterNum = 0 # Number of inference iterations # Print creation parameters if spVerbosity >= 3: self.printParams() print "seed =", seed # Check for errors assert (self.numActivePerInhArea == -1 or self.localAreaDensity == -1) assert (self.inputShape[1] > 2 * self.inputBorder) # 1D layouts have inputShape[0] == 1 if self.inputShape[0] > 1: assert self.inputShape[0] > 2 * self.inputBorder # Calculate other member variables self._coincCount = int(self.coincidencesShape[0] * self.coincidencesShape[1]) self._inputCount = int(self.inputShape[0] * self.inputShape[1]) self._synPermMin = 0.0 self._synPermMax = 1.0 self._pylabInitialized = False # The rate at which we bump up all synapses in response to not passing # stimulusThreshold self._synPermBelowStimulusInc = self.synPermConnected / 10.0 self._hasTopology = True if self.inputShape[0] == 1: # 1-D layout self._coincRFShape = (1, (2 * coincInputRadius + 1)) # If we only have 1 column of coincidences, then assume the user wants # each coincidence to cover the entire input if self.coincidencesShape[1] == 1: assert self.inputBorder >= (self.inputShape[1] - 1) // 2 assert coincInputRadius >= (self.inputShape[1] - 1) // 2 self._coincRFShape = (1, self.inputShape[1]) self._hasTopology = False else: # 2-D layout self._coincRFShape = ((2*coincInputRadius + 1), (2*coincInputRadius + 1)) # This gets set to True in finishLearning. Once set, we don't allow # learning anymore and delete all member variables needed only for # learning. self._doneLearning = False # Init random seed self._seed(seed) # Hard-coded in the current case self.randomTieBreakingFraction = 0.5 # The permanence values used to initialize the master coincs are from # this initial permanence array # The initial permanence is gaussian shaped with mean at center and variance # carefully chosen to have connected synapses initialPermanence = self._initialPermanence() # masterPotentialM, masterPermanenceM and masterConnectedM are numpy arrays # of dimensions (coincCount, coincRfShape[0], coincRFShape[1]) # # masterPotentialM: Keeps track of the potential synapses of each # master. Potential synapses are marked as True # masterPermanenceM: Holds the permanence values of the potential synapses. # The values can range from 0.0 to 1.0 # masterConnectedM: Keeps track of the connected synapses of each # master. Connected synapses are the potential synapses # with permanence values greater than synPermConnected. self._masterPotentialM, self._masterPermanenceM = ( self._makeMasterCoincidences(self.numCloneMasters, self._coincRFShape, self.coincInputPoolPct, initialPermanence, self.random)) # Update connected coincidences, the connected synapses have permanence # values greater than synPermConnected. self._masterConnectedM = [] dense = numpy.zeros(self._coincRFShape) for i in xrange(self.numCloneMasters): self._masterConnectedM.append(SM_01_32_32(dense)) # coinc sizes are used in normalizing the raw overlaps self._masterConnectedCoincSizes = numpy.empty(self.numCloneMasters, 'uint32') # Make one mondo coincidence matrix for all cells at once. It has one row # per cell. The width of each row is the entire input width. There will be # ones in each row where that cell has connections. When we have cloning, # and we modify the connections for a clone master, we will update all # cells that share that clone master with the new connections. self._allConnectedM = SM_01_32_32(self._inputCount) self._allConnectedM.resize(self._coincCount, self._inputCount) # Initialize the dutyCycles and boost factors per clone master self._dutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._dutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) # TODO: We don't need to store _boostFactors, can be calculated from duty # cycle self._firingBoostFactors = numpy.ones(self.numCloneMasters, dtype=realDType) if self.useHighTier: self._firingBoostFactors *= maxFiringBoost # Selectively turn on/off C++ for various methods # TODO: Can we remove the conditional? if self.testMode: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" else: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" # This is used to hold our learning stats (via getLearningStats()) self._learningStats = dict() # These will hold our random state, which we return from __getstate__ and # reseed our random number generators from in __setstate__ so that # a saved/restored SP produces the exact same behavior as one that # continues. This behavior allows us to write unit tests that verify that # the behavior of an SP does not change due to saving/loading from a # checkpoint self._randomState = None self._numpyRandomState = None self._nupicRandomState = None # Init ephemeral members # This also calculates the slices and global inhibitionRadius and allocates # the inhibitionObj self._initEphemerals() # If we have no cloning, make sure no column has potential or connected # synapses outside the input area if self.numCloneMasters == self._coincCount: validMask = numpy.zeros(self._coincRFShape, dtype=realDType) for masterNum in xrange(self._coincCount): coincSlice = self._coincSlices[masterNum] validMask.fill(0) validMask[coincSlice] = 1 self._masterPotentialM[masterNum].logicalAnd(SM_01_32_32(validMask)) self._masterPermanenceM[masterNum].elementMultiply(validMask) # Raise all permanences up until the number of connected is above # our desired target, self._raiseAllPermanences(masterNum, minConnections = self.stimulusThreshold / self.inputDensity) # Calculate the number of connected synapses in each master coincidence now self._updateConnectedCoincidences() def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ return ['_inputLayout', '_cellsForMaster', '_columnCenters', #'_cellRFClipped', '_inputSlices', '_coincSlices', '_activeInput', '_permChanges', '_dupeInput', '_onCells', '_masterOnCells', '_onCellIndices', '_inhibitionObj', '_denseOutput', '_overlaps', '_anomalyScores', '_inputUse', '_updatePermanenceGivenInputFP', '_computeOverlapsFP', '_stats', '_rfRadiusAvg', '_rfRadiusMin', '_rfRadiusMax', '_topDownOut', '_topDownParentCounts', ] def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ # Used by functions which refers to inputs in absolute space # getLearnedCM, cm,.... self._inputLayout = numpy.arange(self._inputCount, dtype=numpy.uint32).reshape(self.inputShape) # This array returns the list of cell indices that correspond to each master cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: self._cellsForMaster = [] for masterNum in xrange(self.numCloneMasters): self._cellsForMaster.append( numpy.where(self._cloneMapFlat == masterNum)[0]) else: self._cellsForMaster = None # TODO: slices are not required for the C++ helper functions # Figure out the slices of shaped input that each column sees... # Figure out the valid region of each column # The reason these slices are in initEphemerals is because numpy slices # can't be pickled self._setSlices() # This holds the output of the inhibition computation - which cells are # on after inhibition self._onCells = numpy.zeros(self._coincCount, dtype=realDType) self._masterOnCells = numpy.zeros(self.numCloneMasters, dtype=realDType) self._onCellIndices = numpy.zeros(self._coincCount, dtype='uint32') # The inhibition object gets allocated by _updateInhibitionObj() during # the first compute and re-allocated periodically during learning self._inhibitionObj = None self._rfRadiusAvg = 0 # Also calculated by _updateInhibitionObj self._rfRadiusMin = 0 self._rfRadiusMax = 0 # Used by the caller to optionally cache the dense output self._denseOutput = None # This holds the overlaps (in absolute number of connected synapses) of each # coinc with input. self._overlaps = numpy.zeros(self._coincCount, dtype=realDType) # This holds the percent overlaps (number of active inputs / number of # connected synapses) of each coinc with input. self._pctOverlaps = numpy.zeros(self._coincCount, dtype=realDType) # This is the value of the anomaly score for each column (after inhibition). self._anomalyScores = numpy.zeros_like(self._overlaps) # This holds the overlaps before stimulus threshold - used for verbose # messages only. self._overlapsBST = numpy.zeros(self._coincCount, dtype=realDType) # This holds the number of coincs connected to an input. if not self._doneLearning: self._inputUse = numpy.zeros(self.inputShape, dtype=realDType) # These are boolean matrices, the same shape as the input. if not self._doneLearning: self._activeInput = numpy.zeros(self.inputShape, dtype='bool') self._dupeInput = numpy.zeros(self.inputShape, dtype='bool') # This is used to hold self.synPermActiveInc where the input is on # and -self.synPermInctiveDec where the input is off if not self._doneLearning: self._permChanges = numpy.zeros(self.inputShape, dtype=realDType) # These are used to compute and hold the output from topDownCompute # self._topDownOut = numpy.zeros(self.inputShape, dtype=realDType) # self._topDownParentCounts = numpy.zeros(self.inputShape, dtype='int') # Fill in the updatePermanenceGivenInput method pointer, which depends on # chosen language. if self._updatePermanenceGivenInputImp == "py": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputPy elif self._updatePermanenceGivenInputImp == "cpp": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputCPP elif self._updatePermanenceGivenInputImp == "test": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputTest else: assert False # Fill in the computeOverlaps method pointer, which depends on # chosen language. if self._computeOverlapsImp == "py": self._computeOverlapsFP = self._computeOverlapsPy elif self._computeOverlapsImp == "cpp": self._computeOverlapsFP = self._computeOverlapsCPP elif self._computeOverlapsImp == "test": self._computeOverlapsFP = self._computeOverlapsTest else: assert False # These variables are used for keeping track of learning statistics (when # self.printPeriodicStats is used). self._periodicStatsCreate() def compute(self, flatInput, learn=False, infer=True, computeAnomaly=False): """Compute with the current input vector. Parameters: ---------------------------- input : the input vector (numpy array) learn : if True, adapt the input histogram based on this input infer : whether to do inference or not """ # If we are using a random SP, ignore the learn parameter if self.randomSP: learn = False # If finishLearning has been called, don't allow learning anymore if learn and self._doneLearning: raise RuntimeError("Learning can not be performed once finishLearning" " has been called.") assert (learn or infer) assert (flatInput.ndim == 1) and (flatInput.shape[0] == self._inputCount) assert (flatInput.dtype == realDType) input = flatInput.reshape(self.inputShape) # Make sure we've allocated the inhibition object lazily if self._inhibitionObj is None: self._updateInhibitionObj() # Reset first timer if self.printPeriodicStats > 0 and self._iterNum == 0: self._periodicStatsReset() # Using cloning? cloningOn = (self.numCloneMasters != self._coincCount) # If we have high verbosity, save the overlaps before stimulus threshold # so we can print them out at the end if self.spVerbosity >= 2: print "===============================================================" print "Iter:%d" % self._iterNum, "inferenceIter:%d" % \ self._inferenceIterNum self._computeOverlapsFP(input, stimulusThreshold=0) self._overlapsBST[:] = self._overlaps connectedCountsOnEntry = self._masterConnectedCoincSizes.copy() if self.spVerbosity >= 3: inputNZ = flatInput.nonzero()[0] print "active inputs: (%d)" % len(inputNZ), inputNZ # TODO: Port to C++, arguments may be different - t1YXArr, # coincInputRadius,... # Calculate the raw overlap of each cell # Overlaps less than stimulus threshold are set to zero in # _calculateOverlaps # This places the result into self._overlaps self._computeOverlapsFP(input, stimulusThreshold=self.stimulusThreshold) # Save the original overlap values, before boosting, for the purpose of # anomaly detection if computeAnomaly: self._anomalyScores[:] = self._overlaps[:] if learn: # Update each cell's duty cycle before inhibition # Only cells with overlaps greater stimulus threshold are considered as # active. # Stimulus threshold has already been applied # TODO: Port to C++? Loops over all coincs # Only updating is carried out here, bump up happens later onCellIndices = numpy.where(self._overlaps > 0) if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: self._onCells.fill(0) self._onCells[onCellIndices] = 1 denseOn = self._onCells # dutyCyclePeriod = self._iterNum + 1 let _dutyCycleBeforeInh # and _dutyCycleAfterInh represent real firing percentage at the # beginning of learning. This will effect boosting and let unlearned # coincidences have high boostFactor at beginning. self.dutyCyclePeriod = min(self._iterNum + 1, 1000) self._dutyCycleBeforeInh = ( ((self.dutyCyclePeriod - 1) * self._dutyCycleBeforeInh + denseOn) / self.dutyCyclePeriod) # Compute firing levels based on boost factor and raw overlap. Update # self._overlaps in place, replacing it with the boosted overlap. We also # computes percent overlap of each column and store that into # self._pctOverlaps if cloningOn: self._pctOverlaps[:] = self._overlaps self._pctOverlaps /= self._masterConnectedCoincSizes[self._cloneMapFlat] boostFactors = self._firingBoostFactors[self._cloneMapFlat] else: self._pctOverlaps[:] = self._overlaps potentials = self._masterConnectedCoincSizes self._pctOverlaps /= numpy.maximum(1, potentials) boostFactors = self._firingBoostFactors # To process minDistance, we do the following: # 1.) All cells which do not overlap the input "highly" (less than # minDistance), are considered to be in the "low tier" and get their # overlap multiplied by their respective boost factor. # 2.) All other cells, which DO overlap the input highly, get a "high tier # offset" added to their overlaps, and boost is not applied. The # "high tier offset" is computed as the max of all the boosted # overlaps from step #1. This insures that a cell in this high tier # will never lose to a cell from the low tier. if self.useHighTier: highTier = numpy.where(self._pctOverlaps >= (1.0 - self.minDistance))[0] else: highTier = [] someInHighTier = len(highTier) > 0 if someInHighTier: boostFactors = numpy.array(boostFactors) boostFactors[highTier] = 1.0 # Apply boostFactors only in learning phase not in inference phase. if learn: self._overlaps *= boostFactors if someInHighTier: highTierOffset = self._overlaps.max() + 1.0 self._overlaps[highTier] += highTierOffset # Cache the dense output for debugging. if self._denseOutput is not None: self._denseOutput = self._overlaps.copy() # Incorporate inhibition and see who is firing after inhibition. # We don't need this method to process stimulusThreshold because we # already processed it. # Also, we pass in a small 'addToWinners' amount which gets added to the # winning elements as we go along. This prevents us from choosing more than # topN winners per inhibition region when more than topN elements all have # the same max high score. learnedCellsOverlaps = numpy.array(self._overlaps) if infer and not learn: # Cells that have never learnt are not allowed to win during inhibition if not self.randomSP: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = 0 else: # Boost the unlearned cells to 1000 so that the winning columns are # picked randomly. From the set of unlearned columns. Boost columns that # havent been learned with uniformly to 1000 so that inhibition picks # randomly from them. if self.useHighTier: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = ( learnedCellsOverlaps.max() + 1) # Boost columns that are in highTier (ie. they match the input very # well). learnedCellsOverlaps[highTier] += learnedCellsOverlaps.max() + 1 # Small random tiebreaker for columns with equal overlap tieBreaker = numpy.random.rand(*learnedCellsOverlaps.shape).astype( realDType) learnedCellsOverlaps += 0.1 * tieBreaker numOn = self._inhibitionObj.compute( learnedCellsOverlaps, self._onCellIndices, 0.0, # stimulusThreshold max(learnedCellsOverlaps)/1000.0, # addToWinners ) self._onCells.fill(0) if numOn > 0: onCellIndices = self._onCellIndices[0:numOn] self._onCells[onCellIndices] = 1 else: onCellIndices = [] # Compute the anomaly scores only for the winning columns. if computeAnomaly: self._anomalyScores *= self._onCells self._anomalyScores *= self._dutyCycleAfterInh if self.spVerbosity >= 2: print "inhRadius", self._inhibitionObj.getInhibitionRadius() print "inhLocalAreaDensity", self._inhibitionObj.getLocalAreaDensity() print "numFiring", numOn # Capturing learning stats? If so, capture the cell overlap statistics if self.printPeriodicStats > 0: activePctOverlaps = self._pctOverlaps[onCellIndices] self._stats['cellPctOverlapSums'] += activePctOverlaps.sum() if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] else: onMasterIndices = onCellIndices self._stats['cellOverlapSums'] += ( activePctOverlaps * self._masterConnectedCoincSizes[onMasterIndices]).sum() # Compute which cells had very high overlap, but were still # inhibited. These we are calling our "orphan cells", because they are # representing an input which is already better represented by another # cell. if self.synPermOrphanDec > 0: orphanCellIndices = set(numpy.where(self._pctOverlaps >= 1.0)[0]) orphanCellIndices.difference_update(onCellIndices) else: orphanCellIndices = [] if learn: # Update the number of coinc connections per input # During learning (adapting permanence values), we need to be able to # recognize dupe inputs - inputs that go two 2 or more active cells if self.synPermActiveSharedDec != 0: self._updateInputUse(onCellIndices) # For the firing cells, update permanence values. onMasterIndices = self._adaptSynapses(onCellIndices, orphanCellIndices, input) # Increase the permanence values of columns which haven't passed # stimulus threshold of overlap with at least a minimum frequency self._bumpUpWeakCoincidences() # Update each cell's after-inhibition duty cycle # TODO: As the on-cells are sparse after inhibition, we can have # a different updateDutyCycles function taking advantage of the sparsity if cloningOn: self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: denseOn = self._onCells self._dutyCycleAfterInh = (( (self.dutyCyclePeriod - 1) * self._dutyCycleAfterInh + denseOn) / self.dutyCyclePeriod) # Update the boost factors based on firings rate after inhibition. self._updateBoostFactors() # Increment iteration number and perform our periodic tasks if it's time. if (self._iterNum + 1) % 50 == 0: self._updateInhibitionObj() self._updateMinDutyCycles( self._dutyCycleBeforeInh, self.minPctDutyCycleBeforeInh, self._minDutyCycleBeforeInh) self._updateMinDutyCycles( self._dutyCycleAfterInh, self.minPctDutyCycleAfterInh, self._minDutyCycleAfterInh) # Next iteration if learn: self._iterNum += 1 if infer: self._inferenceIterNum += 1 if learn: # Capture and possibly print the periodic stats if self.printPeriodicStats > 0: self._periodicStatsComputeEnd(onCellIndices, flatInput.nonzero()[0]) # Verbose print other stats if self.spVerbosity >= 2: cloning = (self.numCloneMasters != self._coincCount) print " #connected on entry: ", fdru.numpyStr( connectedCountsOnEntry, '%d ', includeIndices=True) print " #connected on exit: ", fdru.numpyStr( self._masterConnectedCoincSizes, '%d ', includeIndices=True) if self.spVerbosity >= 3 or not cloning: print " overlaps: ", fdru.numpyStr(self._overlapsBST, '%d ', includeIndices=True, includeZeros=False) print " firing levels: ", fdru.numpyStr(self._overlaps, '%.4f ', includeIndices=True, includeZeros=False) print " on after inhibition: ", onCellIndices if not self._doneLearning: print " minDutyCycleBeforeInh:", fdru.numpyStr( self._minDutyCycleBeforeInh, '%.4f ', includeIndices=True) print " dutyCycleBeforeInh: ", fdru.numpyStr(self._dutyCycleBeforeInh, '%.4f ', includeIndices=True) print " belowMinBeforeInh: " % numpy.nonzero( self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh)[0] print " minDutyCycleAfterInh: ", fdru.numpyStr( self._minDutyCycleAfterInh, '%.4f ', includeIndices=True) print " dutyCycleAfterInh: ", fdru.numpyStr(self._dutyCycleAfterInh, '%.4f ', includeIndices=True) print " belowMinAfterInh: " % numpy.nonzero( self._dutyCycleAfterInh \ < self._minDutyCycleAfterInh)[0] print " firingBoosts: ", fdru.numpyStr(self._firingBoostFactors, '%.4f ', includeIndices=True) print elif self.spVerbosity >= 1: print "SP: learn: ", learn print "SP: active outputs(%d): " % (len(onCellIndices)), onCellIndices self._runIter += 1 # Return inference result return self._onCells def __getstate__(self): # Update our random states self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() state = self.__dict__.copy() # Delete ephemeral members that we don't want pickled for ephemeralMemberName in self._getEphemeralMembers(): if ephemeralMemberName in state: del state[ephemeralMemberName] return state def __setstate__(self, state): self.__dict__.update(state) # Support older checkpoints # These fields were added on 2010-10-05 and _iterNum was preserved if not hasattr(self, '_randomState'): self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() self._iterNum = 0 # Init our random number generators random.setstate(self._randomState) numpy.random.set_state(self._numpyRandomState) self.random.setState(self._nupicRandomState) # Load things that couldn't be pickled... self._initEphemerals() def getAnomalyScore(self): """Get the aggregate anomaly score for this input pattern Returns: A single scalar value for the anomaly score """ numNonzero = len(numpy.nonzero(self._anomalyScores)[0]) return 1.0 / (numpy.sum(self._anomalyScores) + 1) def getLearningStats(self): """Return a dictionary containing a set of statistics related to learning. Here is a list of what is returned: 'activeCountAvg': The average number of active columns seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'underCoveragePct': The average under-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'overCoveragePct': The average over-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesAvg': The overall average number of connection changes made per active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMin': The minimum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMax': The maximum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'rfSize': The average receptive field size of the columns. 'inhibitionRadius': The average inihbition radius of the columns. 'targetDensityPct': The most recent target local area density used, as a percent (0 -> 100) 'coincidenceSizeAvg': The average learned coincidence size 'coincidenceSizeMin': The minimum learned coincidence size 'coincidenceSizeMax': The maximum learned coincidence size 'dcBeforeInhibitionAvg': The average of duty cycle before inhbition of all coincidences 'dcBeforeInhibitionMin': The minimum duty cycle before inhbition of all coincidences 'dcBeforeInhibitionAvg': The maximum duty cycle before inhbition of all coincidences 'dcAfterInhibitionAvg': The average of duty cycle after inhbition of all coincidences 'dcAfterInhibitionMin': The minimum duty cycle after inhbition of all coincidences 'dcAfterInhibitionAvg': The maximum duty cycle after inhbition of all coincidences 'firingBoostAvg': The average firing boost 'firingBoostMin': The minimum firing boost 'firingBoostMax': The maximum firing boost """ # Fill in the stats that can be computed on the fly. The transient stats # that depend on printPeriodicStats being on, have already been stored self._learningStats['rfRadiusAvg'] = self._rfRadiusAvg self._learningStats['rfRadiusMin'] = self._rfRadiusMin self._learningStats['rfRadiusMax'] = self._rfRadiusMax if self._inhibitionObj is not None: self._learningStats['inhibitionRadius'] = ( self._inhibitionObj.getInhibitionRadius()) self._learningStats['targetDensityPct'] = ( 100.0 * self._inhibitionObj.getLocalAreaDensity()) else: print "Warning: No inhibitionObj found for getLearningStats" self._learningStats['inhibitionRadius'] = 0.0 self._learningStats['targetDensityPct'] = 0.0 self._learningStats['coincidenceSizeAvg'] = ( self._masterConnectedCoincSizes.mean()) self._learningStats['coincidenceSizeMin'] = ( self._masterConnectedCoincSizes.min()) self._learningStats['coincidenceSizeMax'] = ( self._masterConnectedCoincSizes.max()) if not self._doneLearning: self._learningStats['dcBeforeInhibitionAvg'] = ( self._dutyCycleBeforeInh.mean()) self._learningStats['dcBeforeInhibitionMin'] = ( self._dutyCycleBeforeInh.min()) self._learningStats['dcBeforeInhibitionMax'] = ( self._dutyCycleBeforeInh.max()) self._learningStats['dcAfterInhibitionAvg'] = ( self._dutyCycleAfterInh.mean()) self._learningStats['dcAfterInhibitionMin'] = ( self._dutyCycleAfterInh.min()) self._learningStats['dcAfterInhibitionMax'] = ( self._dutyCycleAfterInh.max()) self._learningStats['firingBoostAvg'] = self._firingBoostFactors.mean() self._learningStats['firingBoostMin'] = self._firingBoostFactors.min() self._learningStats['firingBoostMax'] = self._firingBoostFactors.max() return self._learningStats def resetStats(self): """Reset the stats (periodic, ???). This will usually be called by user code at the start of each inference run (for a particular data set). TODO: which other stats need to be reset? Learning stats? """ self._periodicStatsReset() def _seed(self, seed=-1): """ Initialize the random seed """ if seed != -1: self.random = NupicRandom(seed) random.seed(seed) numpy.random.seed(seed) else: self.random = NupicRandom() def _initialPermanence(self): """Create and return a 2D matrix filled with initial permanence values. The returned matrix will be of shape: (2*coincInputRadius + 1, 2*coincInputRadius + 1). The initial permanence values are set between 0 and 1.0, with enough chosen above synPermConnected to make it highly likely that a cell will pass stimulusThreshold, given the size of the potential RF, the input pool sampling percentage, and the expected density of the active inputs. If gaussianDist is True, the center of the matrix will contain the highest permanence values and lower values will be farther from the center. If gaussianDist is False, the highest permanence values will be evenly distributed throughout the potential RF. """ # Figure out the target number of connected synapses. We want about 2X # stimulusThreshold minOn = 2 * max(self.stimulusThreshold, 10) / self.coincInputPoolPct \ / self.inputDensity # Get the gaussian distribution, with max magnitude just slightly above # synPermConnected. Try to find a sigma that gives us about 2X # stimulusThreshold connected synapses after sub-sampling for # coincInputPoolPct. We will assume everything within +/- sigma will be # connected. This logic uses the fact that an x value of sigma generates a # magnitude of 0.6. if self.gaussianDist: # Only supported when we have 2D layouts if self._coincRFShape[0] != self._coincRFShape[1]: raise RuntimeError("Gaussian distibuted permanences are currently only" "supported for 2-D layouts") # The width and height of the center "blob" in inputs is the square root # of the area onAreaDim = numpy.sqrt(minOn) # Sigma is at the edge of the center blob sigma = onAreaDim/2 # Create the gaussian with a value of 1.0 at the center perms = self._gaussianMatrix(dim=max(self._coincRFShape), sigma=sigma) # The distance between the min and max values within the gaussian will # be given by 'grange'. In a gaussian, the value at sigma away from the # center is 0.6 * the value at the center. We want the values at sigma # to be synPermConnected maxValue = 1.0 / 0.6 * self.synPermConnected perms *= maxValue perms.shape = (-1,) # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # farther away than 2 sigma to 0. The value of a gaussing at 2 sigma # is 0.135 * the value at the center perms[perms < (0.135 * maxValue)] = 0 # Evenly distribute the permanences through the RF else: # Create a random distribution from 0 to 1. perms = numpy.random.random(self._coincRFShape) perms = perms.astype(realDType) # Set the range of values to be between 0 and # synPermConnected+synPermInctiveDec. This ensures that a pattern # will always be learned in 1 iteration maxValue = min(1.0, self.synPermConnected + self.synPermInactiveDec) # What percentage do we want to be connected? connectPct = 0.50 # What value from the 0 to 1 distribution will map to synPermConnected? threshold = 1.0 - connectPct # Which will be the connected and unconnected synapses? connectedSyns = perms >= threshold unconnectedSyns = numpy.logical_not(connectedSyns) # Squeeze all values between threshold and 1.0 to be between # synPermConnected and synPermConnected + synPermActiveInc / 4 # This makes sure the firing coincidence perms matching input bit get # greater than synPermConnected and other unconnectedSyns get deconnected # in one firing learning iteration. srcOffset = threshold srcRange = 1.0 - threshold dstOffset = self.synPermConnected dstRange = maxValue - self.synPermConnected perms[connectedSyns] = (perms[connectedSyns] - srcOffset)/srcRange \ * dstRange / 4.0 + dstOffset # Squeeze all values between 0 and threshold to be between 0 and # synPermConnected srcRange = threshold - 0.0 dstRange = self.synPermConnected - 0.0 perms[unconnectedSyns] = perms[unconnectedSyns]/srcRange \ * dstRange # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # below synPermActiveInc/2 to 0 perms[perms < (self.synPermActiveInc / 2.0)] = 0 perms.shape = (-1,) return perms def _gaussianMatrix(self, dim, sigma): """ Create and return a 2D matrix filled with a gaussian distribution. The returned matrix will be of shape (dim, dim). The mean of the gaussian will be in the center of the matrix and have a value of 1.0. """ gaussian = lambda x, sigma: numpy.exp(-(x**2) / (2*(sigma**2))) # Allocate the matrix m = numpy.empty((dim, dim), dtype=realDType) # Find the center center = (dim - 1) / 2.0 # TODO: Simplify using numpy.meshgrid # Fill it in for y in xrange(dim): for x in xrange(dim): dist = numpy.sqrt((x-center)**2 + (y-center)**2) m[y,x] = gaussian(dist, sigma) return m def _makeMasterCoincidences(self, numCloneMasters, coincRFShape, coincInputPoolPct, initialPermanence=None, nupicRandom=None): """Make the master coincidence matrices and mater input histograms. # TODO: Update this example >>> FDRCSpatial._makeMasterCoincidences(1, 2, 0.33) (array([[[ True, True, False, False, False], [False, True, False, False, True], [False, True, False, False, False], [False, False, False, True, False], [ True, False, False, False, False]]], dtype=bool), array([[[ 0.26982325, 0.19995725, 0. , 0. , 0. ], [ 0. , 0.94128972, 0. , 0. , 0.36316112], [ 0. , 0.06312726, 0. , 0. , 0. ], [ 0. , 0. , 0. , 0.29740077, 0. ], [ 0.81071907, 0. , 0. , 0. , 0. ]]], dtype=float32)) """ if nupicRandom is None: nupicRandom = NupicRandom(42) if initialPermanence is None: initialPermanence = self._initialPermanence() coincRfArea = (coincRFShape[0] * coincRFShape[1]) coincInputPool = coincInputPoolPct * coincRfArea # We will generate a list of sparse matrices masterPotentialM = [] masterPermanenceM = [] toSample = numpy.arange(coincRfArea, dtype='uint32') toUse = numpy.empty(coincInputPool, dtype='uint32') denseM = numpy.zeros(coincRfArea, dtype=realDType) for i in xrange(numCloneMasters): nupicRandom.getUInt32Sample(toSample, toUse) # Put in 1's into the potential locations denseM.fill(0) denseM[toUse] = 1 masterPotentialM.append(SM_01_32_32(denseM.reshape(coincRFShape))) # Put in the initial permanences denseM *= initialPermanence masterPermanenceM.append(SM32(denseM.reshape(coincRFShape))) # If we are not using common initial permanences, create another # unique one for the next cell if not self.commonDistributions: initialPermanence = self._initialPermanence() return masterPotentialM, masterPermanenceM def _updateConnectedCoincidences(self, masters=None): """Update 'connected' version of the given coincidence. Each 'connected' coincidence is effectively a binary matrix (AKA boolean) matrix that is the same size as the input histogram matrices. They have a 1 wherever the inputHistogram is "above synPermConnected". """ # If no masterNum given, update all of them if masters is None: masters = xrange(self.numCloneMasters) nCellRows, nCellCols = self._coincRFShape cloningOn = (self.numCloneMasters != self._coincCount) for masterNum in masters: # Where are we connected? masterConnectedNZ = ( self._masterPermanenceM[masterNum].whereGreaterEqual( 0, nCellRows, 0, nCellCols, self.synPermConnected)) rowIdxs = masterConnectedNZ[:,0] colIdxs = masterConnectedNZ[:,1] self._masterConnectedM[masterNum].setAllNonZeros( nCellRows, nCellCols, rowIdxs, colIdxs) self._masterConnectedCoincSizes[masterNum] = len(rowIdxs) # Update the corresponding rows in the super, mondo connected matrix that # come from this master masterConnected = ( self._masterConnectedM[masterNum].toDense().astype('bool')) # 0.2s if cloningOn: cells = self._cellsForMaster[masterNum] else: cells = [masterNum] for cell in cells: inputSlice = self._inputSlices[cell] coincSlice = self._coincSlices[cell] masterSubset = masterConnected[coincSlice] sparseCols = self._inputLayout[inputSlice][masterSubset] self._allConnectedM.replaceSparseRow(cell, sparseCols) # 4s. def _setSlices(self): """Compute self._columnSlices and self._inputSlices self._inputSlices are used to index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. There is one item in the list for each column. self._coincSlices are used to index into the coinc (assuming it's been shaped to a 2D array) to get the valid area of the column. There is one item in the list for each column. This function is called upon unpickling, since we can't pickle slices. """ self._columnCenters = numpy.array(self._computeCoincCenters( self.inputShape, self.coincidencesShape, self.inputBorder)) coincInputRadius = self.coincInputRadius coincHeight, coincWidth = self._coincRFShape inputShape = self.inputShape inputBorder = self.inputBorder # Compute the input slices for each cell. This is the slice of the entire # input which intersects with the cell's permanence matrix. if self._hasTopology: self._inputSlices = [ numpy.s_[max(0, cy-coincInputRadius): min(inputShape[0], cy+coincInputRadius + 1), max(0, cx-coincInputRadius): min(inputShape[1], cx+coincInputRadius + 1)] for (cy, cx) in self._columnCenters] else: self._inputSlices = [numpy.s_[0:inputShape[0], 0:inputShape[1]] for (cy, cx) in self._columnCenters] self._inputSlices2 = numpy.zeros(4 * len(self._inputSlices), dtype="uint32") k = 0 for i in range(len(self._inputSlices)): self._inputSlices2[k] = self._inputSlices[i][0].start self._inputSlices2[k + 1] = self._inputSlices[i][0].stop self._inputSlices2[k + 2] = self._inputSlices[i][1].start self._inputSlices2[k + 3] = self._inputSlices[i][1].stop k = k + 4 # Compute the coinc slices for each cell. This is which portion of the # cell's permanence matrix intersects with the input. if self._hasTopology: if self.inputShape[0] > 1: self._coincSlices = [ numpy.s_[max(0, coincInputRadius - cy): min(coincHeight, coincInputRadius + inputShape[0] - cy), max(0, coincInputRadius-cx): min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [ numpy.s_[0:1, max(0, coincInputRadius-cx): min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:coincHeight, 0:coincWidth] for (cy, cx) in self._columnCenters] self._coincSlices2 = numpy.zeros((4*len(self._coincSlices)), dtype="uint32") k = 0 for i in range(len(self._coincSlices)): self._coincSlices2[k] = self._coincSlices[i][0].start self._coincSlices2[k + 1] = self._coincSlices[i][0].stop self._coincSlices2[k + 2] = self._coincSlices[i][1].start self._coincSlices2[k + 3] = self._coincSlices[i][1].stop k = k + 4 @staticmethod def _computeCoincCenters(inputShape, coincidencesShape, inputBorder): """Compute the centers of all coincidences, given parameters. This function is semi-public: tools may use it to generate good visualizations of what the FDRCSpatial node is doing. NOTE: It must be static or global function so that it can be called by the ColumnActivityTab inspector *before* the first compute (before the SP has been constructed). If the input shape is (7,20), shown below with * for each input. ******************** ******************** ******************** ******************** ******************** ******************** ******************** If inputBorder is 1, we distribute the coincidences evenly over the the area after removing the edges, @ shows the allowed input area below. ******************** *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* ******************** Each coincidence is centered at the closest @ and looks at a area with coincInputRadius below it. This function call returns an iterator over the coincidence centers. Each element in iterator is a tuple: (y, x). The iterator returns elements in a fixed order. """ # Determine Y centers if inputShape[0] > 1: # 2-D layout startHeight = inputBorder stopHeight = inputShape[0] - inputBorder else: startHeight = stopHeight = 0 heightCenters = numpy.linspace(startHeight, stopHeight, coincidencesShape[0], endpoint=False).astype('int32') # Determine X centers startWidth = inputBorder stopWidth = inputShape[1] - inputBorder widthCenters = numpy.linspace(startWidth, stopWidth, coincidencesShape[1], endpoint=False).astype('int32') return list(cross(heightCenters, widthCenters)) def _updateInhibitionObj(self): """ Calculate the average inhibitionRadius to use and update the inhibition object accordingly. This looks at the size of the average connected receptive field and uses that to determine the inhibition radius. """ # Compute the inhibition radius. # If using global inhibition, just set it to include the entire region if self.globalInhibition: avgRadius = max(self.coincidencesShape) # Else, set it based on the average size of the connected synapses area in # each cell. else: totalDim = 0 # Get the dimensions of the connected receptive fields of each cell to # compute the average minDim = numpy.inf maxDim = 0 for masterNum in xrange(self.numCloneMasters): masterConnected = self._masterConnectedM[masterNum] nzs = masterConnected.getAllNonZeros() rows, cols = zip(*nzs) rows = numpy.array(rows) cols = numpy.array(cols) if len(rows) >= 2: height = rows.max() - rows.min() + 1 else: height = 1 if len(cols) >= 2: width = cols.max() - cols.min() + 1 else: width = 1 avgDim = (height + width) / 2.0 minDim = min(minDim, avgDim) maxDim = max(maxDim, avgDim) totalDim += avgDim # Get average width/height in input space avgDim = totalDim / self.numCloneMasters self._rfRadiusAvg = (avgDim - 1.0) / 2.0 self._rfRadiusMin = (minDim - 1.0) / 2.0 self._rfRadiusMax = (maxDim - 1.0) / 2.0 # How many columns in cell space does it correspond to? if self.inputShape[0] > 1: # 2-D layout coincsPerInputX = (float(self.coincidencesShape[1]) / (self.inputShape[1] - 2 * self.inputBorder)) coincsPerInputY = (float(self.coincidencesShape[0]) / (self.inputShape[0] - 2 * self.inputBorder)) else: coincsPerInputX = coincsPerInputY = ( float(self.coincidencesShape[1] * self.coincidencesShape[0]) / (self.inputShape[1] - 2 * self.inputBorder)) avgDim *= (coincsPerInputX + coincsPerInputY) / 2 avgRadius = (avgDim - 1.0) / 2.0 avgRadius = max(1.0, avgRadius) # Can't be greater than the overall width or height of the level maxDim = max(self.coincidencesShape) avgRadius = min(avgRadius, maxDim) avgRadius = int(round(avgRadius)) # Is there a need to re-instantiate the inhibition object? if (self._inhibitionObj is None or self._inhibitionObj.getInhibitionRadius() != avgRadius): # What is our target density? if self.localAreaDensity > 0: localAreaDensity = self.localAreaDensity else: numCellsPerInhArea = (avgRadius * 2.0 + 1.0) ** 2 totalCells = self.coincidencesShape[0] * self.coincidencesShape[1] numCellsPerInhArea = min(numCellsPerInhArea, totalCells) localAreaDensity = float(self.numActivePerInhArea) / numCellsPerInhArea # Don't let it be greater than 0.50 localAreaDensity = min(localAreaDensity, 0.50) if self.spVerbosity >= 2: print "Updating inhibition object:" print " avg. rfRadius:", self._rfRadiusAvg print " avg. inhRadius:", avgRadius print " Setting density to:", localAreaDensity self._inhibitionObj = Inhibition2(self.coincidencesShape[0], # height self.coincidencesShape[1], # width avgRadius, # inhRadius localAreaDensity) # density def _updateMinDutyCycles(self, actDutyCycles, minPctDutyCycle, minDutyCycles): """ Calculate and update the minimum acceptable duty cycle for each cell based on the duty cycles of the cells within its inhibition radius and the minPctDutyCycle. Parameters: ----------------------------------------------------------------------- actDutyCycles: The actual duty cycles of all cells minPctDutyCycle: Each cell's minimum duty cycle will be set to minPctDutyCycle times the duty cycle of the most active cell within its inhibition radius minDutyCycles: This array will be updated in place with the new minimum acceptable duty cycles """ # What is the inhibition radius? inhRadius = self._inhibitionObj.getInhibitionRadius() # Reshape the actDutyCycles to match the topology of the level cloningOn = (self.numCloneMasters != self._coincCount) if not cloningOn: actDutyCycles = actDutyCycles.reshape(self.coincidencesShape) minDutyCycles = minDutyCycles.reshape(self.coincidencesShape) # Special, faster handling when inhibition radius includes the entire # set of cells. if cloningOn or inhRadius >= max(self.coincidencesShape): minDutyCycle = minPctDutyCycle * actDutyCycles.max() minDutyCycles.fill(minPctDutyCycle * actDutyCycles.max()) # Else, process each cell else: (numRows, numCols) = self.coincidencesShape for row in xrange(numRows): top = max(0, row - inhRadius) bottom = min(row + inhRadius + 1, numRows) for col in xrange(numCols): left = max(0, col - inhRadius) right = min(col + inhRadius + 1, numCols) maxDutyCycle = actDutyCycles[top:bottom, left:right].max() minDutyCycles[row, col] = maxDutyCycle * minPctDutyCycle if self.spVerbosity >= 2: print "Actual duty cycles:" print fdru.numpyStr(actDutyCycles, '%.4f') print "Recomputed min duty cycles, using inhRadius of", inhRadius print fdru.numpyStr(minDutyCycles, '%.4f') def _computeOverlapsPy(self, inputShaped, stimulusThreshold): """ Computes overlaps for every column for the current input in place. The overlaps less than stimulus threshold are set to zero here. For columns with input RF going off the edge of input field, only regions within the input field are considered. This is equivalent to padding the input field with zeros. Parameters: ------------------------------------------------------------------------ inputShaped: input at the current time step, shaped to the input topology stimulusThreshold: stimulusThreshold to use Member variables used/updated: ------------------------------------------------------------------------ _inputSlices: Index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. _coincSlices: Index into the coinc (assuming it's been shaped to a 2D array) to get the valid region of each column. _overlaps: Result is placed into this array which holds the overlaps of each column with the input """ flatInput = inputShaped.reshape(-1) self._allConnectedM.rightVecSumAtNZ_fast(flatInput, self._overlaps) # Apply stimulusThreshold # TODO: Is there a faster numpy operation for this? self._overlaps[self._overlaps < stimulusThreshold] = 0 self._overlapsNoBoost = self._overlaps.copy() def _computeOverlapsCPP(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but using a C++ implementation. """ cpp_overlap(self._cloneMapFlat, self._inputSlices2, self._coincSlices2, inputShaped, self._masterConnectedM, stimulusThreshold, self._overlaps) def _computeOverlapsTest(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but compares the python and C++ implementations. """ # Py version self._computeOverlapsPy(inputShaped, stimulusThreshold) overlaps2 = copy.deepcopy(self._overlaps) # C++ version self._computeOverlapsCPP(inputShaped, stimulusThreshold) if (abs(self._overlaps - overlaps2) > 1e-6).any(): print self._overlaps, overlaps2, abs(self._overlaps - overlaps2) import pdb; pdb.set_trace() sys.exit(0) def _raiseAllPermanences(self, masterNum, minConnections=None, densePerm=None, densePotential=None): """ Raise all permanences of the given master. If minConnections is given, the permanences will be raised until at least minConnections of them are connected strength. If minConnections is left at None, all permanences will be raised by self._synPermBelowStimulusInc. After raising all permanences, we also "sparsify" the permanence matrix and set to 0 any permanences which are already very close to 0, this keeps the memory requirements of the sparse matrices used to store the permanences lower. Parameters: ---------------------------------------------------------------------------- masterNum: Which master to bump up minConnections: Desired number of connected synapses to have If None, then all permanences are simply bumped up by self._synPermBelowStimulusInc densePerm: The dense representation of the master's permanence matrix, if available. If not specified, we will create this from the stored sparse representation. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. The stored sparse matrix will ALWAYS be updated from the densePerm if the densePerm is provided. densePotential: The dense representation of the master's potential synapses matrix, if available. If not specified, we will create this from the stored sparse potential matrix. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. retval: (modified, numConnections) modified: True if any permanences were raised numConnections: Number of actual connected synapses (not computed if minConnections was None, so None is returned in that case.) """ # It's faster to perform this operation on the dense matrices and # then convert to sparse once we're done since we will be potentially # introducing and then later removing a bunch of non-zeros. # Get references to the sparse perms and potential syns for this master sparsePerm = self._masterPermanenceM[masterNum] sparsePotential = self._masterPotentialM[masterNum] # We will trim off all synapse permanences below this value to 0 in order # to keep the memory requirements of the SparseMatrix lower trimThreshold = self.synPermActiveInc / 2.0 # See if we already have the required number of connections. If we don't, # get the dense form of the permanences if we don't have them already if densePerm is None: # See if we already have enough connections, if so, we can avoid the # overhead of converting to dense if minConnections is not None: numConnected = sparsePerm.countWhereGreaterEqual( 0, self._coincRFShape[0], 0, self._coincRFShape[1], self.synPermConnected) if numConnected >= minConnections: return (False, numConnected) densePerm = self._masterPermanenceM[masterNum].toDense() elif minConnections is not None: numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (False, numConnected) # Get the dense form of the potential synapse locations if densePotential is None: densePotential = self._masterPotentialM[masterNum].toDense() # Form the array with the increments incrementM = densePotential.astype(realDType) incrementM *= self._synPermBelowStimulusInc # Increment until we reach our target number of connections assert (densePerm.dtype == realDType) while True: densePerm += incrementM if minConnections is None: numConnected = None break numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: break # Convert back to sparse form and trim any values that are already # close to zero sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (True, numConnected) def _bumpUpWeakCoincidences(self): """ This bump-up ensures every coincidence have non-zero connections. We find all coincidences which have overlaps less than stimulus threshold. We add synPermActiveInc to all the synapses. This step when repeated over time leads to synapses crossing synPermConnected threshold. """ # Update each cell's connected threshold based on the duty cycle before # inhibition. The connected threshold is linearly interpolated # between the points (dutyCycle:0, thresh:0) and (dutyCycle:minDuty, # thresh:synPermConnected). This is a line defined as: y = mx + b # thresh = synPermConnected/minDuty * dutyCycle bumpUpList = ( self._dutyCycleBeforeInh < self._minDutyCycleBeforeInh).nonzero()[0] for master in bumpUpList: self._raiseAllPermanences(master) # Update the connected synapses for each master we touched. self._updateConnectedCoincidences(bumpUpList) if self.spVerbosity >= 2 and len(bumpUpList) > 0: print ("Bumping up permanences in following cells due to falling below" "minDutyCycleBeforeInh:"), bumpUpList def _updateBoostFactors(self): """ Update the boost factors. The boost factors is linearly interpolated between the points (dutyCycle:0, boost:maxFiringBoost) and (dutyCycle:minDuty, boost:1.0). This is a line defined as: y = mx + b boost = (1-maxFiringBoost)/minDuty * dutyCycle + maxFiringBoost Parameters: ------------------------------------------------------------------------ boostFactors: numpy array of boost factors, defined per master """ if self._minDutyCycleAfterInh.sum() > 0: self._firingBoostFactors = ( (1 - self.maxFiringBoost) / self._minDutyCycleAfterInh * self._dutyCycleAfterInh + self.maxFiringBoost) self._firingBoostFactors[self._dutyCycleAfterInh > self._minDutyCycleAfterInh] = 1.0 def _updateInputUse(self, onCellIndices): """ During learning (adapting permanence values), we need to be able to tell which inputs are going to 2 or more active cells at once. We step through each coinc and mark all the inputs it is connected to. The inputUse array acts as a counter for the number of connections to the coincs from each input. Parameters: ------------------------------------------------------------------------ inputUse: numpy array of number of coincs connected to each input """ allConnected = SM32(self._allConnectedM) # TODO: avoid this copy self._inputUse[:] = allConnected.addListOfRows( onCellIndices).reshape(self.inputShape) def _adaptSynapses(self, onCellIndices, orphanCellIndices, input): """ This is the main function in learning of SP. The permanence values are changed based on the learning rules. Parameters: ------------------------------------------------------------------------ onCellIndices: columns which are turned on after inhibition. The permanence values of these coincs are adapted based on the input. orphanCellIndices: columns which had very high overlap with the input, but ended up being inhibited input: Input, shaped to the input topology retval: list of masterCellIndices that were actually updated, or None if cloning is off """ # Capturing learning stats? if self.printPeriodicStats > 0: self._stats['explainedInputsCurIteration'] = set() # Precompute the active, inactive, and dupe inputs up front for speed # TODO: put these into pre-allocated arrays for speed self._activeInput[:] = input # Create a matrix containing the default permanence deltas for each input self._permChanges.fill(-1 * self.synPermInactiveDec) self._permChanges[self._activeInput] = self.synPermActiveInc if self.synPermActiveSharedDec != 0: numpy.logical_and(self._activeInput, self._inputUse>1, self._dupeInput) self._permChanges[self._dupeInput] -= self.synPermActiveSharedDec # Cloning? If so, scramble the onCells so that we pick a random one to # update for each master. We only update a master cell at most one time # per input presentation. cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: # Scramble the onCellIndices so that we pick a random one to update onCellIndices = list(onCellIndices) random.shuffle(onCellIndices) visitedMasters = set() # For the firing cells, update permanence values for columnNum in itertools.chain(onCellIndices, orphanCellIndices): # Get the master number masterNum = self._cloneMapFlat[columnNum] # If cloning, only visit each master once if cloningOn: if masterNum in visitedMasters: continue visitedMasters.add(masterNum) # Get the slices of input that overlap with the valid area of this master inputSlice = self._inputSlices[columnNum] rfActiveInput = self._activeInput[inputSlice] rfPermChanges = self._permChanges[inputSlice] # Get the potential synapses, permanence values, and connected synapses # for this master masterPotential = self._masterPotentialM[masterNum].toDense() masterPermanence = self._masterPermanenceM[masterNum].toDense() masterConnected = ( self._masterConnectedM[masterNum].toDense().astype('bool')) # Make changes only over the areas that overlap the input level. For # coincidences near the edge of the level for example, this excludes the # synapses outside the edge. coincSlice = self._coincSlices[columnNum] masterValidPermanence= masterPermanence[coincSlice] # Capturing learning stats? if self.printPeriodicStats > 0: masterValidConnected = masterConnected[coincSlice] explainedInputs = self._inputLayout[inputSlice][masterValidConnected] self._stats['explainedInputsCurIteration'].update(explainedInputs) if self.spVerbosity >= 3: print " adapting cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " initialConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) print " firingLevel: %d" % (self._overlaps[columnNum]) print " firingBoostFactor: %f" % (self._firingBoostFactors[masterNum]) print " input slice: \n" self._printInputSlice(rfActiveInput, prefix=' ') # Update permanences given the active input (NOTE: The "FP" in this # function name stands for "Function Pointer"). if columnNum in orphanCellIndices: # Decrease permanence of active inputs masterValidPermanence[rfActiveInput] -= self.synPermOrphanDec else: self._updatePermanenceGivenInputFP(columnNum, masterNum, input, self._inputUse, masterPermanence, masterValidPermanence, rfActiveInput, rfPermChanges) # Clip to absolute min and max permanence values numpy.clip(masterPermanence, self._synPermMin, self._synPermMax, out=masterPermanence) # Keep only the potential syns for this cell numpy.multiply(masterPermanence, masterPotential, masterPermanence) # If we are tracking learning stats, prepare to see how many changes # were made to the cell connections if self.printPeriodicStats > 0: masterConnectedOrig = SM_01_32_32(self._masterConnectedM[masterNum]) # If the number of connected synapses happens to fall below # stimulusThreshold, bump up all permanences a bit. # We could also just wait for the "duty cycle falls below # minDutyCycleBeforeInb" logic to catch it, but doing it here is # pre-emptive and much faster. # # The "duty cycle falls below minDutyCycleBeforeInb" logic will still # catch other possible situations, like: # * if the set of inputs a cell learned suddenly stop firing due to # input statistic changes # * damage to the level below # * input is very sparse and we still don't pass stimulusThreshold even # with stimulusThreshold conneted synapses. self._raiseAllPermanences(masterNum, minConnections=self.stimulusThreshold, densePerm=masterPermanence, densePotential=masterPotential) # Update the matrices that contain the connected syns for this cell. self._updateConnectedCoincidences([masterNum]) # If we are tracking learning stats, see how many changes were made to # this cell's connections if self.printPeriodicStats > 0: origNumConnections = masterConnectedOrig.nNonZeros() masterConnectedOrig.logicalAnd(self._masterConnectedM[masterNum]) numUnchanged = masterConnectedOrig.nNonZeros() numChanges = origNumConnections - numUnchanged numChanges += (self._masterConnectedM[masterNum].nNonZeros() - numUnchanged) self._stats['numChangedConnectionsSum'][masterNum] += numChanges self._stats['numLearns'][masterNum] += 1 # Verbose? if self.spVerbosity >= 3: print " done cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " newConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) self._printSyns(columnNum, prefix=' ', showValues=(self.spVerbosity >= 4)) print # Return list of updated masters if cloningOn: return list(visitedMasters) else: return onCellIndices def _updatePermanenceGivenInputPy( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Given the input to a master coincidence, update it's permanence values based on our learning rules. On Entry, we are given the slice of the permanence matrix that corresponds only to the area of the coincidence master that is within the borders of the entire input field. Parameters: ------------------------------------------------------------------------ columnNum: The column number of this cell masterNum: The master coincidence that corresponds to this column input: The entire input, shaped appropriately inputUse: The same shape as input. Each entry is a count of the number of *currently active cells* that are connected to that input. permanence: The entire masterPermanence matrix for this master permanenceSlice: The slice of the masterPermanence matrix for this master that intersects the input field, i.e. does not overhang the outside edges of the input. activeInputSlice: The portion of 'input' that intersects permanenceSlice, set to True where input != 0 permChangesSlice: The portion of 'input' that intersects permanenceSlice, set to self.synPermActiveInc where input != 0 and self.synPermInactiveDec where the input == 0. This is used to optimally apply self.synPermActiveInc and self.synPermInactiveDec at the same time and can be used for any cell whose _synPermBoostFactor is set to 1.0. """ # TODO: This function does nothing. # Apply the baseline increment/decrements permanenceSlice += permChangesSlice # If this cell has permanence boost, apply the incremental def _updatePermanenceGivenInputCPP( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but using a C++ implementation. """ inputNCols = self.inputShape[1] masterNCols = self._masterPotentialM[masterNum].shape[1] # TODO: synPermBoostFactors has been removed. CPP implementation has not # been updated for this. adjustMasterValidPermanence(columnNum, masterNum, inputNCols, masterNCols, self.synPermActiveInc, self.synPermInactiveDec, self.synPermActiveSharedDec, input, inputUse, self._inputSlices2, self._coincSlices2, self._synPermBoostFactors, permanence) def _updatePermanenceGivenInputTest( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but compares the python and C++ implementations. """ mp2 = copy.deepcopy(permanence) mvp2 = copy.deepcopy(permanenceSlice) # Py version import pdb; pdb.set_trace() self._updatePermanenceGivenInputPy(columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice) # C++ version self._updatePermanenceGivenInputCPP(columnNum, masterNum, input, inputUse, mp2, mvp2, activeInputSlice, permChangesSlice) if abs(mp2 - permanence).max() > 1e-6: print abs(mp2 - permanence).max() import pdb; pdb.set_trace() sys.exit(0) def _periodicStatsCreate(self): """ Allocate the periodic stats structure """ self._stats = dict() self._stats['numChangedConnectionsSum'] = numpy.zeros( self.numCloneMasters, dtype=realDType) self._stats['numLearns'] = numpy.zeros( self.numCloneMasters, dtype=realDType) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['maxBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # This dict maintains mappings of specific input patterns to specific # output patterns. It is used to detect "thrashing" of cells. We measure # how similar the output presentation of a specific input is to the # last time we saw it. self._stats['inputPatterns'] = dict() self._stats['inputPatternsLimit'] = 5000 self._periodicStatsReset() def _periodicStatsReset(self): """ Reset the periodic stats this is done every N iterations before capturing a new set of stats. """ self._stats['numSamples'] = 0 self._stats['numOnSum'] = 0 self._stats['underCoveragePctSum'] = 0 self._stats['overCoveragePctSum'] = 0 self._stats['cellOverlapSums'] = 0 self._stats['cellPctOverlapSums'] = 0 self._stats['explainedInputsCurIteration'] = set() self._stats['startTime'] = time.time() # These keep a count of the # of changed connections per update # for each master self._stats['numChangedConnectionsSum'].fill(0) self._stats['numLearns'].fill(0) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'].fill(self.maxFiringBoost) self._stats['maxBoostFactor'].fill(0) # This keeps track of the average distance between the SP output of # a specific input pattern now and the last time we saw it. self._stats['outputPatternDistanceSum'] = 0 self._stats['outputPatternSamples'] = 0 def _periodicStatsComputeEnd(self, activeCells, activeInputs): """ Called at the end of compute. This increments the number of computes and also summarizes the under and over coverage and whatever other periodic stats we need. If the period is up, it then prints the accumuated stats and resets them for the next period Parameters: ------------------------------------------------------------------ activeCells: list of the active cells activeInputs: list of the active inputs """ # Update number of samples self._stats['numSamples'] += 1 # Compute under and over coverage numOn = len(activeCells) self._stats['numOnSum'] += numOn expInput = self._stats['explainedInputsCurIteration'] inputLen = len(activeInputs) underCoverage = len(set(activeInputs).difference(expInput)) self._stats['underCoveragePctSum'] += float(underCoverage) / inputLen expInput.difference_update(activeInputs) overCoverage = len(expInput) self._stats['overCoveragePctSum'] += float(overCoverage) / inputLen # Keep track of the min and max boost factor seen for each column numpy.minimum(self._firingBoostFactors, self._stats['minBoostFactor'], self._stats['minBoostFactor']) numpy.maximum(self._firingBoostFactors, self._stats['maxBoostFactor'], self._stats['maxBoostFactor']) # Calculate the distance in the SP output between this input now # and the last time we saw it. inputPattern = str(sorted(activeInputs)) outputNZ, sampleIdx = self._stats['inputPatterns'].get(inputPattern, (None, None)) activeCellSet = set(activeCells) if outputNZ is not None: distance = (len(activeCellSet.difference(outputNZ)) + len(outputNZ.difference(activeCellSet))) self._stats['inputPatterns'][inputPattern] = (activeCellSet, sampleIdx) self._stats['outputPatternDistanceSum'] += distance self._stats['outputPatternSamples'] += 1 # Add this sample to our dict, if it's not too large already elif len(self._stats['inputPatterns']) < self._stats['inputPatternsLimit']: self._stats['inputPatterns'][inputPattern] = (activeCellSet, self._iterNum) # If it's not time to print them out, return now. if (self._iterNum % self.printPeriodicStats) != 0: return numSamples = float(self._stats['numSamples']) # Calculate number of changes made per master masterTouched = numpy.where(self._stats['numLearns'] > 0) if len(masterTouched[0]) == 0: numMasterChanges = numpy.zeros(1) else: numMasterChanges = self._stats['numChangedConnectionsSum'][masterTouched] numMasterChanges /= self._stats['numLearns'][masterTouched] # This fills in the static learning stats into self._learningStats self.getLearningStats() # Calculate and copy the transient learning stats into the # self._learningStats dict, for possible retrieval later by # the getLearningStats() method. self._learningStats['elapsedTime'] = time.time() - self._stats['startTime'] self._learningStats['activeCountAvg'] = (self._stats['numOnSum'] / numSamples) self._learningStats['underCoveragePct'] = ( 100.0 * self._stats['underCoveragePctSum'] / numSamples) self._learningStats['overCoveragePct'] = ( (100.0 * self._stats['overCoveragePctSum'] / numSamples)) self._learningStats['numConnectionChangesAvg'] = numMasterChanges.mean() self._learningStats['numConnectionChangesMin'] = numMasterChanges.min() self._learningStats['numConnectionChangesMax'] = numMasterChanges.max() self._learningStats['avgCellOverlap'] = ( (float(self._stats['cellOverlapSums']) / max(1, self._stats['numOnSum']))) self._learningStats['avgCellPctOverlap'] = ( (100.0 * self._stats['cellPctOverlapSums'] / max(1, self._stats['numOnSum']))) self._learningStats['firingBoostMaxChangePct'] = ( 100.0 * (self._stats['maxBoostFactor'] / self._stats['minBoostFactor']).max() - 100.0) self._learningStats['outputRepresentationChangeAvg'] = ( float(self._stats['outputPatternDistanceSum']) / max(1, self._stats['outputPatternSamples'])) self._learningStats['outputRepresentationChangePctAvg'] = ( 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) self._learningStats['numUniqueInputsSeen'] = ( len(self._stats['inputPatterns'])) if (self._learningStats['numUniqueInputsSeen'] >= self._stats['inputPatternsLimit']): self._learningStats['numUniqueInputsSeen'] = -1 # Print all stats captured print "Learning stats for the last %d iterations:" % (numSamples) print " iteration #: %d" % (self._iterNum) print " inference iteration #: %d" % (self._inferenceIterNum) print " elapsed time: %.2f" % ( self._learningStats['elapsedTime']) print " avg activeCount: %.1f" % ( self._learningStats['activeCountAvg']) print " avg under/overCoverage: %-6.1f / %-6.1f %%" % ( self._learningStats['underCoveragePct'], self._learningStats['overCoveragePct']) print " avg cell overlap: %-6.1f / %-6.1f %%" % ( self._learningStats['avgCellOverlap'], self._learningStats['avgCellPctOverlap']) print " avg/min/max RF radius: %-6.1f / %-6.1f / %-6.1f" % ( self._learningStats['rfRadiusAvg'], self._learningStats['rfRadiusMin'], self._learningStats['rfRadiusMax']) print " inhibition radius: %d" % ( self._learningStats['inhibitionRadius']) print " target density: %.5f %%" % ( self._learningStats['targetDensityPct']) print " avg/min/max coinc. size: %-6.1f / %-6d / %-6d" % ( self._learningStats['coincidenceSizeAvg'], self._learningStats['coincidenceSizeMin'], self._learningStats['coincidenceSizeMax']) print " avg/min/max DC before inh: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['dcBeforeInhibitionAvg'], self._learningStats['dcBeforeInhibitionMin'], self._learningStats['dcBeforeInhibitionMax']) print " avg/min/max DC after inh: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['dcAfterInhibitionAvg'], self._learningStats['dcAfterInhibitionMin'], self._learningStats['dcAfterInhibitionMax']) print " avg/min/max boost: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['firingBoostAvg'], self._learningStats['firingBoostMin'], self._learningStats['firingBoostMax']) print " avg/min/max # conn. changes: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['numConnectionChangesAvg'], self._learningStats['numConnectionChangesMin'], self._learningStats['numConnectionChangesMax']) print " max change in boost: %.1f %%" % ( self._learningStats['firingBoostMaxChangePct']) print " avg change in output repr.: %-6.1f / %-6.1f %%" % ( self._learningStats['outputRepresentationChangeAvg'], 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) print " # of unique input pats seen: %d" % ( self._learningStats['numUniqueInputsSeen']) # Reset the stats for the next period. self._periodicStatsReset() def _printInputSlice(self, inputSlice, prefix=''): """Print the given input slice in a nice human readable format. Parameters: --------------------------------------------------------------------- cell: The slice of input to print prefix: This is printed at the start of each row of the coincidence """ # Shape of each coincidence rfHeight, rfWidth = inputSlice.shape syns = inputSlice != 0 def _synStr(x): if not x: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) print prefix, ''.join(items) def _printSyns(self, cell, prefix='', showValues=False): """Print the synapse permanence values for the given cell in a nice, human, readable format. Parameters: --------------------------------------------------------------------- cell: which cell to print prefix: This is printed at the start of each row of the coincidence showValues: If True, print the values of each permanence. If False, just print a ' ' if not connected and a '*' if connected """ # Shape of each coincidence (rfHeight, rfWidth) = self.inputShape # Get the synapse permanences. masterNum = self._cloneMapFlat[cell] syns = self._masterPermanenceM[masterNum].toDense() if showValues: def _synStr(x): if x == 0: return ' -- ' elif x < 0.001: return ' 0 ' elif x >= self.synPermConnected: return '#%3.2f' % x else: return ' %3.2f' % x else: def _synStr(x): if x < self.synPermConnected: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) if showValues: print prefix, ' '.join(items) else: print prefix, ''.join(items) def _printMemberSizes(self, over=100): """Print the size of each member.""" members = self.__dict__.keys() sizeNamePairs = [] totalSize = 0 for member in members: item = self.__dict__[member] if hasattr(item, '__func__'): continue try: if hasattr(item, '__len__'): size = 0 for i in xrange(len(item)): size += len(cPickle.dumps(item[i])) else: size = len(cPickle.dumps(item)) except: print "WARNING: Can't pickle %s" % (member) size = 0 sizeNamePairs.append((size, member)) totalSize += size # Print them out from highest to lowest sizeNamePairs.sort(reverse=True) for (size, name) in sizeNamePairs: if size > over: print "%10d (%10.3fMb) %s" % (size, size/1000000.0, name) print "\nTOTAL: %10d (%10.3fMB) " % (totalSize, totalSize/1000000.0) def printParams(self): """Print the main creation parameters associated with this instance.""" print "FDRCSpatial2 creation parameters: " print "inputShape =", self.inputShape print "inputBorder =", self.inputBorder print "inputDensity =", self.inputDensity print "coincidencesShape =", self.coincidencesShape print "coincInputRadius =", self.coincInputRadius print "coincInputPoolPct =", self.coincInputPoolPct print "gaussianDist =", self.gaussianDist print "commonDistributions =", self.commonDistributions print "localAreaDensity =", self.localAreaDensity print "numActivePerInhArea =", self.numActivePerInhArea print "stimulusThreshold =", self.stimulusThreshold print "synPermInactiveDec =", self.synPermInactiveDec print "synPermActiveInc =", self.synPermActiveInc print "synPermActiveSharedDec =", self.synPermActiveSharedDec print "synPermOrphanDec =", self.synPermOrphanDec print "synPermConnected =", self.synPermConnected print "minPctDutyCycleBeforeInh =", self.minPctDutyCycleBeforeInh print "minPctDutyCycleAfterInh =", self.minPctDutyCycleAfterInh print "dutyCyclePeriod =", self.dutyCyclePeriod print "maxFiringBoost =", self.maxFiringBoost print "maxSSFiringBoost =", self.maxSSFiringBoost print "maxSynPermBoost =", self.maxSynPermBoost print "minDistance =", self.minDistance print "spVerbosity =", self.spVerbosity print "printPeriodicStats =", self.printPeriodicStats print "testMode =", self.testMode print "numCloneMasters =", self.numCloneMasters Update printParams in old SP # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """Spatial pooler implementation. TODO: Change print statements to use the logging module. """ import copy import cPickle import inspect import itertools import math import numpy import numpy.random from operator import itemgetter import os import random import struct import sys import time from nupic.bindings.algorithms import (adjustMasterValidPermanence, cpp_overlap, cpp_overlap_sbm, Inhibition2) from nupic.bindings.math import (count_gte, GetNTAReal, Random as NupicRandom, SM_01_32_32, SM32) from nupic.math.cross import cross from nupic.research import fdrutilities as fdru realDType = GetNTAReal() gPylabInitialized = False # kDutyCycleFactor add dutyCycleAfterInh to overlap in Inhibition step to be a # tie breaker kDutyCycleFactor = 0.01 def _extractCallingMethodArgs(): """ Returns args dictionary from the calling method """ callingFrame = inspect.stack()[1][0] argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame) argNames.remove("self") args = copy.copy(frameLocalVarDict) for varName in frameLocalVarDict: if varName not in argNames: args.pop(varName) return args class FDRCSpatial2(object): """ Class for spatial pooling based on fixed random distributed representation (FDR). This version of FDRCSpatial inlcudes adaptive receptive fields, no-dupe rules and gradual boosting. It supports 1-D and 2-D topologies with cloning. """ def __init__(self, inputShape=(32, 32), inputBorder=8, inputDensity=1.0, coincidencesShape=(48, 48), coincInputRadius=16, coincInputPoolPct=1.0, gaussianDist=False, commonDistributions=False, localAreaDensity=-1.0, numActivePerInhArea=10.0, stimulusThreshold=0, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermActiveSharedDec=0.0, synPermOrphanDec=0.0, synPermConnected=0.10, minPctDutyCycleBeforeInh=0.001, minPctDutyCycleAfterInh=0.001, dutyCyclePeriod=1000, maxFiringBoost=10.0, maxSSFiringBoost=2.0, maxSynPermBoost=10.0, minDistance=0.0, cloneMap=None, numCloneMasters=-1, seed=-1, spVerbosity=0, printPeriodicStats=0, testMode=False, globalInhibition=False, spReconstructionParam="unweighted_mean", useHighTier=True, randomSP=False, ): """ Parameters: ---------------------------- inputShape: The dimensions of the input vector. Format is (height, width) e.g. (24, 72). If the input is from a sensor, it is interpreted as having a 2-D topology of 24 pixels high and 72 wide. inputBorder: The first column from an edge will be centered over an input which is 'inputBorder' inputs from the edge. inputDensity: The density of the input. This is only to aid in figuring out the initial number of connected synapses to place on each column. The lower the inputDensity, the more initial connections will be assigned to each column. coincidencesShape: The dimensions of column layout. Format is (height, width) e.g. (80,100) means a total of 80*100 = 800 are arranged in a 2-D topology with 80 rows and 100 columns. coincInputRadius: This defines the max radius of the receptive field of each column. This is used to limit memory requirements and processing time. It could be set large enough to encompass the entire input field and the SP would still work fine, but require more memory and processing time. This parameter defines a square area: a column will have a max square RF with sides of length 2 * coincInputRadius + 1. coincInputPoolPct What percent of the columns's receptive field is available for potential synapses. At initialization time, we will choose coincInputPoolPct * (2*coincInputRadius + 1)^2 potential synapses from the receptive field. gaussianDist: If true, the initial permanences assigned to each column will have a gaussian distribution to them, making the column favor inputs directly below it over inputs farther away. If false, the initial permanences will have a random distribution across the column's entire potential receptive field. commonDistributions: If set to True (the default, faster startup time), each column will be given the same initial permanence values. This is normally OK when you will be training, but if you will be sticking with the untrained network, you will want to set this to False (which makes startup take longer). localAreaDensity: The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area). numActivePerInhArea: An alternate way to control the density of the active columns. If numActivePerInhArea is specified then localAreaDensity must be -1, and vice versa. When using numActivePerInhArea, the inhibition logic will insure that at most 'numActivePerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields. stimulusThreshold: This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. synPermInactiveDec: How much an inactive synapse is decremented, specified as a percent of a fully grown synapse. synPermActiveInc: How much to increase the permanence of an active synapse, specified as a percent of a fully grown synapse. synPermActiveSharedDec: How much to decrease the permanence of an active synapse which is connected to another column that is active at the same time. Specified as a percent of a fully grown synapse. synPermOrphanDec: How much to decrease the permanence of an active synapse on a column which has high overlap with the input, but was inhibited (an "orphan" column). synPermConnected: The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing. Typical value is 0.10. Cells whose activity level before inhibition falls below minDutyCycleBeforeInh will have their own internal synPermConnectedCell threshold set below this default value. (This concept applies to both SP and TP and so 'cells' is correct here as opposed to 'columns') minPctDutyCycleBeforeInh: A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the duty cycle before inhibition of all other column within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle before inhibition falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns due to the no-dupe rule. minPctDutyCycleAfterInh: A number between 0 and 1.0, used to set a floor on how often a column should turn ON after inhibition. Periodically, each column looks at the duty cycle after inhibition of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased. dutyCyclePeriod: The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate. maxFiringBoost: The maximum firing level boost factor. Each column's raw firing strength gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxFiringBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxFiringBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. maxSSFiringBoost: Once a column turns ON, it's boost will immediately fall down to maxSSFiringBoost if it is above it. This is accomplished by internally raising it's computed duty cycle accordingly. This prevents a cell which has had it's boost raised extremely high from turning ON for too many diverse inputs in a row within a short period of time. maxSynPermBoost: The maximum synPermActiveInc boost factor. Each column's synPermActiveInc gets multiplied by a boost factor to make the column more or less likely to form new connections. The actual boost factor used is a number between 1.0 and maxSynPermBoost. A boost factor of 1.0 is used if the duty cycle is >= minDutyCycle, maxSynPermBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints. minDistance: This parameter impacts how finely the input space is quantized. It is a value between 0 and 1.0. If set to 0, then every unique input presentation will generate a unique output representation, within the limits of the total number of columns available. Higher values will tend to group similar inputs together into the same output representation. Only column which overlap with the input less than 100*(1.0-minDistance) percent will have a possibility of losing the inhibition competition against a boosted, 'bored' cell. cloneMap: An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. numCloneMasters: The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. seed: Seed for our own pseudo-random number generator. spVerbosity: spVerbosity level: 0, 1, 2, or 3 printPeriodicStats: If > 0, then every 'printPeriodicStats' iterations, the SP will print to stdout some statistics related to learning, such as the average pct under and over-coverage, average number of active columns, etc. in the last 'showLearningStats' iterations. testMode: If True, run the SP in test mode. This runs both the C++ and python implementations on all internal functions that support both and insures that both produce the same result. globalInhibition: If true, enforce the localAreaDensity/numActivePerInhArea globally over the entire region, ignoring any dynamically calculated inhibitionRadius. In effect, this is the same as setting the inhibition radius to include the entire region. spReconstructionParam:Specifies which SP reconstruction optimization to be used. Each column's firing strength is weighted by the percent Overlap, permanence or duty Cycle if this parameter is set to 'pctOverlap', 'permanence', or 'dutycycle' respectively. If parameter is set to 'maximum_firingstrength', the maximum of the firing strengths (weighted by permanence) is used instead of the weighted sum. useHighTier: The "high tier" feature is to deal with sparse input spaces. If over (1-minDistance) percent of a column's connected synapses are active, it will automatically become one of the winning columns. If False, columns are activated based on their absolute overlap with the input. Also, boosting will be disabled to prevent pattern oscillation randomSP: If True, the SP will not update its permanences and will instead use it's initial configuration for all inferences. """ # Save our __init__ args for debugging self._initArgsDict = _extractCallingMethodArgs() # Handle people instantiating us directly that don't pass in a cloneMap... # This creates a clone map without any cloning if cloneMap is None: cloneMap, numCloneMasters = fdru.makeCloneMap( columnsShape=coincidencesShape, outputCloningWidth=coincidencesShape[1], outputCloningHeight=coincidencesShape[0] ) self.numCloneMasters = numCloneMasters self._cloneMapFlat = cloneMap.reshape((-1,)) # Save creation parameters self.inputShape = int(inputShape[0]), int(inputShape[1]) self.inputBorder = inputBorder self.inputDensity = inputDensity self.coincidencesShape = coincidencesShape self.coincInputRadius = coincInputRadius self.coincInputPoolPct = coincInputPoolPct self.gaussianDist = gaussianDist self.commonDistributions = commonDistributions self.localAreaDensity = localAreaDensity self.numActivePerInhArea = numActivePerInhArea self.stimulusThreshold = stimulusThreshold self.synPermInactiveDec = synPermInactiveDec self.synPermActiveInc = synPermActiveInc self.synPermActiveSharedDec = synPermActiveSharedDec self.synPermOrphanDec = synPermOrphanDec self.synPermConnected = synPermConnected self.minPctDutyCycleBeforeInh = minPctDutyCycleBeforeInh self.minPctDutyCycleAfterInh = minPctDutyCycleAfterInh self.dutyCyclePeriod = dutyCyclePeriod self.maxFiringBoost = maxFiringBoost self.maxSSFiringBoost = maxSSFiringBoost self.maxSynPermBoost = maxSynPermBoost self.minDistance = minDistance self.spVerbosity = spVerbosity self.printPeriodicStats = printPeriodicStats self.testMode = testMode self.globalInhibition = globalInhibition self.spReconstructionParam = spReconstructionParam self.useHighTier= useHighTier != 0 self.randomSP = randomSP != 0 if not self.useHighTier: self.minPctDutyCycleAfterInh = 0 self.fileCount = 0 self._runIter = 0 # Start at iteration #0 self._iterNum = 0 # Number of learning iterations self._inferenceIterNum = 0 # Number of inference iterations # Print creation parameters if spVerbosity >= 2: self.printParams() print "seed =", seed # Check for errors assert (self.numActivePerInhArea == -1 or self.localAreaDensity == -1) assert (self.inputShape[1] > 2 * self.inputBorder) # 1D layouts have inputShape[0] == 1 if self.inputShape[0] > 1: assert self.inputShape[0] > 2 * self.inputBorder # Calculate other member variables self._coincCount = int(self.coincidencesShape[0] * self.coincidencesShape[1]) self._inputCount = int(self.inputShape[0] * self.inputShape[1]) self._synPermMin = 0.0 self._synPermMax = 1.0 self._pylabInitialized = False # The rate at which we bump up all synapses in response to not passing # stimulusThreshold self._synPermBelowStimulusInc = self.synPermConnected / 10.0 self._hasTopology = True if self.inputShape[0] == 1: # 1-D layout self._coincRFShape = (1, (2 * coincInputRadius + 1)) # If we only have 1 column of coincidences, then assume the user wants # each coincidence to cover the entire input if self.coincidencesShape[1] == 1: assert self.inputBorder >= (self.inputShape[1] - 1) // 2 assert coincInputRadius >= (self.inputShape[1] - 1) // 2 self._coincRFShape = (1, self.inputShape[1]) self._hasTopology = False else: # 2-D layout self._coincRFShape = ((2*coincInputRadius + 1), (2*coincInputRadius + 1)) # This gets set to True in finishLearning. Once set, we don't allow # learning anymore and delete all member variables needed only for # learning. self._doneLearning = False # Init random seed self._seed(seed) # Hard-coded in the current case self.randomTieBreakingFraction = 0.5 # The permanence values used to initialize the master coincs are from # this initial permanence array # The initial permanence is gaussian shaped with mean at center and variance # carefully chosen to have connected synapses initialPermanence = self._initialPermanence() # masterPotentialM, masterPermanenceM and masterConnectedM are numpy arrays # of dimensions (coincCount, coincRfShape[0], coincRFShape[1]) # # masterPotentialM: Keeps track of the potential synapses of each # master. Potential synapses are marked as True # masterPermanenceM: Holds the permanence values of the potential synapses. # The values can range from 0.0 to 1.0 # masterConnectedM: Keeps track of the connected synapses of each # master. Connected synapses are the potential synapses # with permanence values greater than synPermConnected. self._masterPotentialM, self._masterPermanenceM = ( self._makeMasterCoincidences(self.numCloneMasters, self._coincRFShape, self.coincInputPoolPct, initialPermanence, self.random)) # Update connected coincidences, the connected synapses have permanence # values greater than synPermConnected. self._masterConnectedM = [] dense = numpy.zeros(self._coincRFShape) for i in xrange(self.numCloneMasters): self._masterConnectedM.append(SM_01_32_32(dense)) # coinc sizes are used in normalizing the raw overlaps self._masterConnectedCoincSizes = numpy.empty(self.numCloneMasters, 'uint32') # Make one mondo coincidence matrix for all cells at once. It has one row # per cell. The width of each row is the entire input width. There will be # ones in each row where that cell has connections. When we have cloning, # and we modify the connections for a clone master, we will update all # cells that share that clone master with the new connections. self._allConnectedM = SM_01_32_32(self._inputCount) self._allConnectedM.resize(self._coincCount, self._inputCount) # Initialize the dutyCycles and boost factors per clone master self._dutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleBeforeInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._dutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) self._minDutyCycleAfterInh = numpy.zeros(self.numCloneMasters, dtype=realDType) # TODO: We don't need to store _boostFactors, can be calculated from duty # cycle self._firingBoostFactors = numpy.ones(self.numCloneMasters, dtype=realDType) if self.useHighTier: self._firingBoostFactors *= maxFiringBoost # Selectively turn on/off C++ for various methods # TODO: Can we remove the conditional? if self.testMode: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" else: self._computeOverlapsImp = "py" # "py or "cpp" or "test" self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test" # This is used to hold our learning stats (via getLearningStats()) self._learningStats = dict() # These will hold our random state, which we return from __getstate__ and # reseed our random number generators from in __setstate__ so that # a saved/restored SP produces the exact same behavior as one that # continues. This behavior allows us to write unit tests that verify that # the behavior of an SP does not change due to saving/loading from a # checkpoint self._randomState = None self._numpyRandomState = None self._nupicRandomState = None # Init ephemeral members # This also calculates the slices and global inhibitionRadius and allocates # the inhibitionObj self._initEphemerals() # If we have no cloning, make sure no column has potential or connected # synapses outside the input area if self.numCloneMasters == self._coincCount: validMask = numpy.zeros(self._coincRFShape, dtype=realDType) for masterNum in xrange(self._coincCount): coincSlice = self._coincSlices[masterNum] validMask.fill(0) validMask[coincSlice] = 1 self._masterPotentialM[masterNum].logicalAnd(SM_01_32_32(validMask)) self._masterPermanenceM[masterNum].elementMultiply(validMask) # Raise all permanences up until the number of connected is above # our desired target, self._raiseAllPermanences(masterNum, minConnections = self.stimulusThreshold / self.inputDensity) # Calculate the number of connected synapses in each master coincidence now self._updateConnectedCoincidences() def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ return ['_inputLayout', '_cellsForMaster', '_columnCenters', #'_cellRFClipped', '_inputSlices', '_coincSlices', '_activeInput', '_permChanges', '_dupeInput', '_onCells', '_masterOnCells', '_onCellIndices', '_inhibitionObj', '_denseOutput', '_overlaps', '_anomalyScores', '_inputUse', '_updatePermanenceGivenInputFP', '_computeOverlapsFP', '_stats', '_rfRadiusAvg', '_rfRadiusMin', '_rfRadiusMax', '_topDownOut', '_topDownParentCounts', ] def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ # Used by functions which refers to inputs in absolute space # getLearnedCM, cm,.... self._inputLayout = numpy.arange(self._inputCount, dtype=numpy.uint32).reshape(self.inputShape) # This array returns the list of cell indices that correspond to each master cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: self._cellsForMaster = [] for masterNum in xrange(self.numCloneMasters): self._cellsForMaster.append( numpy.where(self._cloneMapFlat == masterNum)[0]) else: self._cellsForMaster = None # TODO: slices are not required for the C++ helper functions # Figure out the slices of shaped input that each column sees... # Figure out the valid region of each column # The reason these slices are in initEphemerals is because numpy slices # can't be pickled self._setSlices() # This holds the output of the inhibition computation - which cells are # on after inhibition self._onCells = numpy.zeros(self._coincCount, dtype=realDType) self._masterOnCells = numpy.zeros(self.numCloneMasters, dtype=realDType) self._onCellIndices = numpy.zeros(self._coincCount, dtype='uint32') # The inhibition object gets allocated by _updateInhibitionObj() during # the first compute and re-allocated periodically during learning self._inhibitionObj = None self._rfRadiusAvg = 0 # Also calculated by _updateInhibitionObj self._rfRadiusMin = 0 self._rfRadiusMax = 0 # Used by the caller to optionally cache the dense output self._denseOutput = None # This holds the overlaps (in absolute number of connected synapses) of each # coinc with input. self._overlaps = numpy.zeros(self._coincCount, dtype=realDType) # This holds the percent overlaps (number of active inputs / number of # connected synapses) of each coinc with input. self._pctOverlaps = numpy.zeros(self._coincCount, dtype=realDType) # This is the value of the anomaly score for each column (after inhibition). self._anomalyScores = numpy.zeros_like(self._overlaps) # This holds the overlaps before stimulus threshold - used for verbose # messages only. self._overlapsBST = numpy.zeros(self._coincCount, dtype=realDType) # This holds the number of coincs connected to an input. if not self._doneLearning: self._inputUse = numpy.zeros(self.inputShape, dtype=realDType) # These are boolean matrices, the same shape as the input. if not self._doneLearning: self._activeInput = numpy.zeros(self.inputShape, dtype='bool') self._dupeInput = numpy.zeros(self.inputShape, dtype='bool') # This is used to hold self.synPermActiveInc where the input is on # and -self.synPermInctiveDec where the input is off if not self._doneLearning: self._permChanges = numpy.zeros(self.inputShape, dtype=realDType) # These are used to compute and hold the output from topDownCompute # self._topDownOut = numpy.zeros(self.inputShape, dtype=realDType) # self._topDownParentCounts = numpy.zeros(self.inputShape, dtype='int') # Fill in the updatePermanenceGivenInput method pointer, which depends on # chosen language. if self._updatePermanenceGivenInputImp == "py": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputPy elif self._updatePermanenceGivenInputImp == "cpp": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputCPP elif self._updatePermanenceGivenInputImp == "test": self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputTest else: assert False # Fill in the computeOverlaps method pointer, which depends on # chosen language. if self._computeOverlapsImp == "py": self._computeOverlapsFP = self._computeOverlapsPy elif self._computeOverlapsImp == "cpp": self._computeOverlapsFP = self._computeOverlapsCPP elif self._computeOverlapsImp == "test": self._computeOverlapsFP = self._computeOverlapsTest else: assert False # These variables are used for keeping track of learning statistics (when # self.printPeriodicStats is used). self._periodicStatsCreate() def compute(self, flatInput, learn=False, infer=True, computeAnomaly=False): """Compute with the current input vector. Parameters: ---------------------------- input : the input vector (numpy array) learn : if True, adapt the input histogram based on this input infer : whether to do inference or not """ # If we are using a random SP, ignore the learn parameter if self.randomSP: learn = False # If finishLearning has been called, don't allow learning anymore if learn and self._doneLearning: raise RuntimeError("Learning can not be performed once finishLearning" " has been called.") assert (learn or infer) assert (flatInput.ndim == 1) and (flatInput.shape[0] == self._inputCount) assert (flatInput.dtype == realDType) input = flatInput.reshape(self.inputShape) # Make sure we've allocated the inhibition object lazily if self._inhibitionObj is None: self._updateInhibitionObj() # Reset first timer if self.printPeriodicStats > 0 and self._iterNum == 0: self._periodicStatsReset() # Using cloning? cloningOn = (self.numCloneMasters != self._coincCount) # If we have high verbosity, save the overlaps before stimulus threshold # so we can print them out at the end if self.spVerbosity >= 2: print "===============================================================" print "Iter:%d" % self._iterNum, "inferenceIter:%d" % \ self._inferenceIterNum self._computeOverlapsFP(input, stimulusThreshold=0) self._overlapsBST[:] = self._overlaps connectedCountsOnEntry = self._masterConnectedCoincSizes.copy() if self.spVerbosity >= 3: inputNZ = flatInput.nonzero()[0] print "active inputs: (%d)" % len(inputNZ), inputNZ # TODO: Port to C++, arguments may be different - t1YXArr, # coincInputRadius,... # Calculate the raw overlap of each cell # Overlaps less than stimulus threshold are set to zero in # _calculateOverlaps # This places the result into self._overlaps self._computeOverlapsFP(input, stimulusThreshold=self.stimulusThreshold) # Save the original overlap values, before boosting, for the purpose of # anomaly detection if computeAnomaly: self._anomalyScores[:] = self._overlaps[:] if learn: # Update each cell's duty cycle before inhibition # Only cells with overlaps greater stimulus threshold are considered as # active. # Stimulus threshold has already been applied # TODO: Port to C++? Loops over all coincs # Only updating is carried out here, bump up happens later onCellIndices = numpy.where(self._overlaps > 0) if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: self._onCells.fill(0) self._onCells[onCellIndices] = 1 denseOn = self._onCells # dutyCyclePeriod = self._iterNum + 1 let _dutyCycleBeforeInh # and _dutyCycleAfterInh represent real firing percentage at the # beginning of learning. This will effect boosting and let unlearned # coincidences have high boostFactor at beginning. self.dutyCyclePeriod = min(self._iterNum + 1, 1000) self._dutyCycleBeforeInh = ( ((self.dutyCyclePeriod - 1) * self._dutyCycleBeforeInh + denseOn) / self.dutyCyclePeriod) # Compute firing levels based on boost factor and raw overlap. Update # self._overlaps in place, replacing it with the boosted overlap. We also # computes percent overlap of each column and store that into # self._pctOverlaps if cloningOn: self._pctOverlaps[:] = self._overlaps self._pctOverlaps /= self._masterConnectedCoincSizes[self._cloneMapFlat] boostFactors = self._firingBoostFactors[self._cloneMapFlat] else: self._pctOverlaps[:] = self._overlaps potentials = self._masterConnectedCoincSizes self._pctOverlaps /= numpy.maximum(1, potentials) boostFactors = self._firingBoostFactors # To process minDistance, we do the following: # 1.) All cells which do not overlap the input "highly" (less than # minDistance), are considered to be in the "low tier" and get their # overlap multiplied by their respective boost factor. # 2.) All other cells, which DO overlap the input highly, get a "high tier # offset" added to their overlaps, and boost is not applied. The # "high tier offset" is computed as the max of all the boosted # overlaps from step #1. This insures that a cell in this high tier # will never lose to a cell from the low tier. if self.useHighTier: highTier = numpy.where(self._pctOverlaps >= (1.0 - self.minDistance))[0] else: highTier = [] someInHighTier = len(highTier) > 0 if someInHighTier: boostFactors = numpy.array(boostFactors) boostFactors[highTier] = 1.0 # Apply boostFactors only in learning phase not in inference phase. if learn: self._overlaps *= boostFactors if someInHighTier: highTierOffset = self._overlaps.max() + 1.0 self._overlaps[highTier] += highTierOffset # Cache the dense output for debugging. if self._denseOutput is not None: self._denseOutput = self._overlaps.copy() # Incorporate inhibition and see who is firing after inhibition. # We don't need this method to process stimulusThreshold because we # already processed it. # Also, we pass in a small 'addToWinners' amount which gets added to the # winning elements as we go along. This prevents us from choosing more than # topN winners per inhibition region when more than topN elements all have # the same max high score. learnedCellsOverlaps = numpy.array(self._overlaps) if infer and not learn: # Cells that have never learnt are not allowed to win during inhibition if not self.randomSP: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = 0 else: # Boost the unlearned cells to 1000 so that the winning columns are # picked randomly. From the set of unlearned columns. Boost columns that # havent been learned with uniformly to 1000 so that inhibition picks # randomly from them. if self.useHighTier: learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = ( learnedCellsOverlaps.max() + 1) # Boost columns that are in highTier (ie. they match the input very # well). learnedCellsOverlaps[highTier] += learnedCellsOverlaps.max() + 1 # Small random tiebreaker for columns with equal overlap tieBreaker = numpy.random.rand(*learnedCellsOverlaps.shape).astype( realDType) learnedCellsOverlaps += 0.1 * tieBreaker numOn = self._inhibitionObj.compute( learnedCellsOverlaps, self._onCellIndices, 0.0, # stimulusThreshold max(learnedCellsOverlaps)/1000.0, # addToWinners ) self._onCells.fill(0) if numOn > 0: onCellIndices = self._onCellIndices[0:numOn] self._onCells[onCellIndices] = 1 else: onCellIndices = [] # Compute the anomaly scores only for the winning columns. if computeAnomaly: self._anomalyScores *= self._onCells self._anomalyScores *= self._dutyCycleAfterInh if self.spVerbosity >= 2: print "inhRadius", self._inhibitionObj.getInhibitionRadius() print "inhLocalAreaDensity", self._inhibitionObj.getLocalAreaDensity() print "numFiring", numOn # Capturing learning stats? If so, capture the cell overlap statistics if self.printPeriodicStats > 0: activePctOverlaps = self._pctOverlaps[onCellIndices] self._stats['cellPctOverlapSums'] += activePctOverlaps.sum() if cloningOn: onMasterIndices = self._cloneMapFlat[onCellIndices] else: onMasterIndices = onCellIndices self._stats['cellOverlapSums'] += ( activePctOverlaps * self._masterConnectedCoincSizes[onMasterIndices]).sum() # Compute which cells had very high overlap, but were still # inhibited. These we are calling our "orphan cells", because they are # representing an input which is already better represented by another # cell. if self.synPermOrphanDec > 0: orphanCellIndices = set(numpy.where(self._pctOverlaps >= 1.0)[0]) orphanCellIndices.difference_update(onCellIndices) else: orphanCellIndices = [] if learn: # Update the number of coinc connections per input # During learning (adapting permanence values), we need to be able to # recognize dupe inputs - inputs that go two 2 or more active cells if self.synPermActiveSharedDec != 0: self._updateInputUse(onCellIndices) # For the firing cells, update permanence values. onMasterIndices = self._adaptSynapses(onCellIndices, orphanCellIndices, input) # Increase the permanence values of columns which haven't passed # stimulus threshold of overlap with at least a minimum frequency self._bumpUpWeakCoincidences() # Update each cell's after-inhibition duty cycle # TODO: As the on-cells are sparse after inhibition, we can have # a different updateDutyCycles function taking advantage of the sparsity if cloningOn: self._masterOnCells.fill(0) self._masterOnCells[onMasterIndices] = 1 denseOn = self._masterOnCells else: denseOn = self._onCells self._dutyCycleAfterInh = (( (self.dutyCyclePeriod - 1) * self._dutyCycleAfterInh + denseOn) / self.dutyCyclePeriod) # Update the boost factors based on firings rate after inhibition. self._updateBoostFactors() # Increment iteration number and perform our periodic tasks if it's time. if (self._iterNum + 1) % 50 == 0: self._updateInhibitionObj() self._updateMinDutyCycles( self._dutyCycleBeforeInh, self.minPctDutyCycleBeforeInh, self._minDutyCycleBeforeInh) self._updateMinDutyCycles( self._dutyCycleAfterInh, self.minPctDutyCycleAfterInh, self._minDutyCycleAfterInh) # Next iteration if learn: self._iterNum += 1 if infer: self._inferenceIterNum += 1 if learn: # Capture and possibly print the periodic stats if self.printPeriodicStats > 0: self._periodicStatsComputeEnd(onCellIndices, flatInput.nonzero()[0]) # Verbose print other stats if self.spVerbosity >= 2: cloning = (self.numCloneMasters != self._coincCount) print " #connected on entry: ", fdru.numpyStr( connectedCountsOnEntry, '%d ', includeIndices=True) print " #connected on exit: ", fdru.numpyStr( self._masterConnectedCoincSizes, '%d ', includeIndices=True) if self.spVerbosity >= 3 or not cloning: print " overlaps: ", fdru.numpyStr(self._overlapsBST, '%d ', includeIndices=True, includeZeros=False) print " firing levels: ", fdru.numpyStr(self._overlaps, '%.4f ', includeIndices=True, includeZeros=False) print " on after inhibition: ", onCellIndices if not self._doneLearning: print " minDutyCycleBeforeInh:", fdru.numpyStr( self._minDutyCycleBeforeInh, '%.4f ', includeIndices=True) print " dutyCycleBeforeInh: ", fdru.numpyStr(self._dutyCycleBeforeInh, '%.4f ', includeIndices=True) print " belowMinBeforeInh: " % numpy.nonzero( self._dutyCycleBeforeInh \ < self._minDutyCycleBeforeInh)[0] print " minDutyCycleAfterInh: ", fdru.numpyStr( self._minDutyCycleAfterInh, '%.4f ', includeIndices=True) print " dutyCycleAfterInh: ", fdru.numpyStr(self._dutyCycleAfterInh, '%.4f ', includeIndices=True) print " belowMinAfterInh: " % numpy.nonzero( self._dutyCycleAfterInh \ < self._minDutyCycleAfterInh)[0] print " firingBoosts: ", fdru.numpyStr(self._firingBoostFactors, '%.4f ', includeIndices=True) print elif self.spVerbosity >= 1: print "SP: learn: ", learn print "SP: active outputs(%d): " % (len(onCellIndices)), onCellIndices self._runIter += 1 # Return inference result return self._onCells def __getstate__(self): # Update our random states self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() state = self.__dict__.copy() # Delete ephemeral members that we don't want pickled for ephemeralMemberName in self._getEphemeralMembers(): if ephemeralMemberName in state: del state[ephemeralMemberName] return state def __setstate__(self, state): self.__dict__.update(state) # Support older checkpoints # These fields were added on 2010-10-05 and _iterNum was preserved if not hasattr(self, '_randomState'): self._randomState = random.getstate() self._numpyRandomState = numpy.random.get_state() self._nupicRandomState = self.random.getState() self._iterNum = 0 # Init our random number generators random.setstate(self._randomState) numpy.random.set_state(self._numpyRandomState) self.random.setState(self._nupicRandomState) # Load things that couldn't be pickled... self._initEphemerals() def getAnomalyScore(self): """Get the aggregate anomaly score for this input pattern Returns: A single scalar value for the anomaly score """ numNonzero = len(numpy.nonzero(self._anomalyScores)[0]) return 1.0 / (numpy.sum(self._anomalyScores) + 1) def getLearningStats(self): """Return a dictionary containing a set of statistics related to learning. Here is a list of what is returned: 'activeCountAvg': The average number of active columns seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'underCoveragePct': The average under-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'overCoveragePct': The average over-coverage of the input as seen over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesAvg': The overall average number of connection changes made per active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMin': The minimum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'numConnectionChangesMax': The maximum number of connection changes made to an active column per iteration, over the last N training iterations, where N is set by the constructor parameter printPeriodicStats. This gives an indication as to how much learning is still occuring. If printPeriodicStats is not turned on (== 0), then this is -1 'rfSize': The average receptive field size of the columns. 'inhibitionRadius': The average inihbition radius of the columns. 'targetDensityPct': The most recent target local area density used, as a percent (0 -> 100) 'coincidenceSizeAvg': The average learned coincidence size 'coincidenceSizeMin': The minimum learned coincidence size 'coincidenceSizeMax': The maximum learned coincidence size 'dcBeforeInhibitionAvg': The average of duty cycle before inhbition of all coincidences 'dcBeforeInhibitionMin': The minimum duty cycle before inhbition of all coincidences 'dcBeforeInhibitionAvg': The maximum duty cycle before inhbition of all coincidences 'dcAfterInhibitionAvg': The average of duty cycle after inhbition of all coincidences 'dcAfterInhibitionMin': The minimum duty cycle after inhbition of all coincidences 'dcAfterInhibitionAvg': The maximum duty cycle after inhbition of all coincidences 'firingBoostAvg': The average firing boost 'firingBoostMin': The minimum firing boost 'firingBoostMax': The maximum firing boost """ # Fill in the stats that can be computed on the fly. The transient stats # that depend on printPeriodicStats being on, have already been stored self._learningStats['rfRadiusAvg'] = self._rfRadiusAvg self._learningStats['rfRadiusMin'] = self._rfRadiusMin self._learningStats['rfRadiusMax'] = self._rfRadiusMax if self._inhibitionObj is not None: self._learningStats['inhibitionRadius'] = ( self._inhibitionObj.getInhibitionRadius()) self._learningStats['targetDensityPct'] = ( 100.0 * self._inhibitionObj.getLocalAreaDensity()) else: print "Warning: No inhibitionObj found for getLearningStats" self._learningStats['inhibitionRadius'] = 0.0 self._learningStats['targetDensityPct'] = 0.0 self._learningStats['coincidenceSizeAvg'] = ( self._masterConnectedCoincSizes.mean()) self._learningStats['coincidenceSizeMin'] = ( self._masterConnectedCoincSizes.min()) self._learningStats['coincidenceSizeMax'] = ( self._masterConnectedCoincSizes.max()) if not self._doneLearning: self._learningStats['dcBeforeInhibitionAvg'] = ( self._dutyCycleBeforeInh.mean()) self._learningStats['dcBeforeInhibitionMin'] = ( self._dutyCycleBeforeInh.min()) self._learningStats['dcBeforeInhibitionMax'] = ( self._dutyCycleBeforeInh.max()) self._learningStats['dcAfterInhibitionAvg'] = ( self._dutyCycleAfterInh.mean()) self._learningStats['dcAfterInhibitionMin'] = ( self._dutyCycleAfterInh.min()) self._learningStats['dcAfterInhibitionMax'] = ( self._dutyCycleAfterInh.max()) self._learningStats['firingBoostAvg'] = self._firingBoostFactors.mean() self._learningStats['firingBoostMin'] = self._firingBoostFactors.min() self._learningStats['firingBoostMax'] = self._firingBoostFactors.max() return self._learningStats def resetStats(self): """Reset the stats (periodic, ???). This will usually be called by user code at the start of each inference run (for a particular data set). TODO: which other stats need to be reset? Learning stats? """ self._periodicStatsReset() def _seed(self, seed=-1): """ Initialize the random seed """ if seed != -1: self.random = NupicRandom(seed) random.seed(seed) numpy.random.seed(seed) else: self.random = NupicRandom() def _initialPermanence(self): """Create and return a 2D matrix filled with initial permanence values. The returned matrix will be of shape: (2*coincInputRadius + 1, 2*coincInputRadius + 1). The initial permanence values are set between 0 and 1.0, with enough chosen above synPermConnected to make it highly likely that a cell will pass stimulusThreshold, given the size of the potential RF, the input pool sampling percentage, and the expected density of the active inputs. If gaussianDist is True, the center of the matrix will contain the highest permanence values and lower values will be farther from the center. If gaussianDist is False, the highest permanence values will be evenly distributed throughout the potential RF. """ # Figure out the target number of connected synapses. We want about 2X # stimulusThreshold minOn = 2 * max(self.stimulusThreshold, 10) / self.coincInputPoolPct \ / self.inputDensity # Get the gaussian distribution, with max magnitude just slightly above # synPermConnected. Try to find a sigma that gives us about 2X # stimulusThreshold connected synapses after sub-sampling for # coincInputPoolPct. We will assume everything within +/- sigma will be # connected. This logic uses the fact that an x value of sigma generates a # magnitude of 0.6. if self.gaussianDist: # Only supported when we have 2D layouts if self._coincRFShape[0] != self._coincRFShape[1]: raise RuntimeError("Gaussian distibuted permanences are currently only" "supported for 2-D layouts") # The width and height of the center "blob" in inputs is the square root # of the area onAreaDim = numpy.sqrt(minOn) # Sigma is at the edge of the center blob sigma = onAreaDim/2 # Create the gaussian with a value of 1.0 at the center perms = self._gaussianMatrix(dim=max(self._coincRFShape), sigma=sigma) # The distance between the min and max values within the gaussian will # be given by 'grange'. In a gaussian, the value at sigma away from the # center is 0.6 * the value at the center. We want the values at sigma # to be synPermConnected maxValue = 1.0 / 0.6 * self.synPermConnected perms *= maxValue perms.shape = (-1,) # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # farther away than 2 sigma to 0. The value of a gaussing at 2 sigma # is 0.135 * the value at the center perms[perms < (0.135 * maxValue)] = 0 # Evenly distribute the permanences through the RF else: # Create a random distribution from 0 to 1. perms = numpy.random.random(self._coincRFShape) perms = perms.astype(realDType) # Set the range of values to be between 0 and # synPermConnected+synPermInctiveDec. This ensures that a pattern # will always be learned in 1 iteration maxValue = min(1.0, self.synPermConnected + self.synPermInactiveDec) # What percentage do we want to be connected? connectPct = 0.50 # What value from the 0 to 1 distribution will map to synPermConnected? threshold = 1.0 - connectPct # Which will be the connected and unconnected synapses? connectedSyns = perms >= threshold unconnectedSyns = numpy.logical_not(connectedSyns) # Squeeze all values between threshold and 1.0 to be between # synPermConnected and synPermConnected + synPermActiveInc / 4 # This makes sure the firing coincidence perms matching input bit get # greater than synPermConnected and other unconnectedSyns get deconnected # in one firing learning iteration. srcOffset = threshold srcRange = 1.0 - threshold dstOffset = self.synPermConnected dstRange = maxValue - self.synPermConnected perms[connectedSyns] = (perms[connectedSyns] - srcOffset)/srcRange \ * dstRange / 4.0 + dstOffset # Squeeze all values between 0 and threshold to be between 0 and # synPermConnected srcRange = threshold - 0.0 dstRange = self.synPermConnected - 0.0 perms[unconnectedSyns] = perms[unconnectedSyns]/srcRange \ * dstRange # Now, let's clip off the low values to reduce the number of non-zeros # we have and reduce our memory requirements. We'll clip everything # below synPermActiveInc/2 to 0 perms[perms < (self.synPermActiveInc / 2.0)] = 0 perms.shape = (-1,) return perms def _gaussianMatrix(self, dim, sigma): """ Create and return a 2D matrix filled with a gaussian distribution. The returned matrix will be of shape (dim, dim). The mean of the gaussian will be in the center of the matrix and have a value of 1.0. """ gaussian = lambda x, sigma: numpy.exp(-(x**2) / (2*(sigma**2))) # Allocate the matrix m = numpy.empty((dim, dim), dtype=realDType) # Find the center center = (dim - 1) / 2.0 # TODO: Simplify using numpy.meshgrid # Fill it in for y in xrange(dim): for x in xrange(dim): dist = numpy.sqrt((x-center)**2 + (y-center)**2) m[y,x] = gaussian(dist, sigma) return m def _makeMasterCoincidences(self, numCloneMasters, coincRFShape, coincInputPoolPct, initialPermanence=None, nupicRandom=None): """Make the master coincidence matrices and mater input histograms. # TODO: Update this example >>> FDRCSpatial._makeMasterCoincidences(1, 2, 0.33) (array([[[ True, True, False, False, False], [False, True, False, False, True], [False, True, False, False, False], [False, False, False, True, False], [ True, False, False, False, False]]], dtype=bool), array([[[ 0.26982325, 0.19995725, 0. , 0. , 0. ], [ 0. , 0.94128972, 0. , 0. , 0.36316112], [ 0. , 0.06312726, 0. , 0. , 0. ], [ 0. , 0. , 0. , 0.29740077, 0. ], [ 0.81071907, 0. , 0. , 0. , 0. ]]], dtype=float32)) """ if nupicRandom is None: nupicRandom = NupicRandom(42) if initialPermanence is None: initialPermanence = self._initialPermanence() coincRfArea = (coincRFShape[0] * coincRFShape[1]) coincInputPool = coincInputPoolPct * coincRfArea # We will generate a list of sparse matrices masterPotentialM = [] masterPermanenceM = [] toSample = numpy.arange(coincRfArea, dtype='uint32') toUse = numpy.empty(coincInputPool, dtype='uint32') denseM = numpy.zeros(coincRfArea, dtype=realDType) for i in xrange(numCloneMasters): nupicRandom.getUInt32Sample(toSample, toUse) # Put in 1's into the potential locations denseM.fill(0) denseM[toUse] = 1 masterPotentialM.append(SM_01_32_32(denseM.reshape(coincRFShape))) # Put in the initial permanences denseM *= initialPermanence masterPermanenceM.append(SM32(denseM.reshape(coincRFShape))) # If we are not using common initial permanences, create another # unique one for the next cell if not self.commonDistributions: initialPermanence = self._initialPermanence() return masterPotentialM, masterPermanenceM def _updateConnectedCoincidences(self, masters=None): """Update 'connected' version of the given coincidence. Each 'connected' coincidence is effectively a binary matrix (AKA boolean) matrix that is the same size as the input histogram matrices. They have a 1 wherever the inputHistogram is "above synPermConnected". """ # If no masterNum given, update all of them if masters is None: masters = xrange(self.numCloneMasters) nCellRows, nCellCols = self._coincRFShape cloningOn = (self.numCloneMasters != self._coincCount) for masterNum in masters: # Where are we connected? masterConnectedNZ = ( self._masterPermanenceM[masterNum].whereGreaterEqual( 0, nCellRows, 0, nCellCols, self.synPermConnected)) rowIdxs = masterConnectedNZ[:,0] colIdxs = masterConnectedNZ[:,1] self._masterConnectedM[masterNum].setAllNonZeros( nCellRows, nCellCols, rowIdxs, colIdxs) self._masterConnectedCoincSizes[masterNum] = len(rowIdxs) # Update the corresponding rows in the super, mondo connected matrix that # come from this master masterConnected = ( self._masterConnectedM[masterNum].toDense().astype('bool')) # 0.2s if cloningOn: cells = self._cellsForMaster[masterNum] else: cells = [masterNum] for cell in cells: inputSlice = self._inputSlices[cell] coincSlice = self._coincSlices[cell] masterSubset = masterConnected[coincSlice] sparseCols = self._inputLayout[inputSlice][masterSubset] self._allConnectedM.replaceSparseRow(cell, sparseCols) # 4s. def _setSlices(self): """Compute self._columnSlices and self._inputSlices self._inputSlices are used to index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. There is one item in the list for each column. self._coincSlices are used to index into the coinc (assuming it's been shaped to a 2D array) to get the valid area of the column. There is one item in the list for each column. This function is called upon unpickling, since we can't pickle slices. """ self._columnCenters = numpy.array(self._computeCoincCenters( self.inputShape, self.coincidencesShape, self.inputBorder)) coincInputRadius = self.coincInputRadius coincHeight, coincWidth = self._coincRFShape inputShape = self.inputShape inputBorder = self.inputBorder # Compute the input slices for each cell. This is the slice of the entire # input which intersects with the cell's permanence matrix. if self._hasTopology: self._inputSlices = [ numpy.s_[max(0, cy-coincInputRadius): min(inputShape[0], cy+coincInputRadius + 1), max(0, cx-coincInputRadius): min(inputShape[1], cx+coincInputRadius + 1)] for (cy, cx) in self._columnCenters] else: self._inputSlices = [numpy.s_[0:inputShape[0], 0:inputShape[1]] for (cy, cx) in self._columnCenters] self._inputSlices2 = numpy.zeros(4 * len(self._inputSlices), dtype="uint32") k = 0 for i in range(len(self._inputSlices)): self._inputSlices2[k] = self._inputSlices[i][0].start self._inputSlices2[k + 1] = self._inputSlices[i][0].stop self._inputSlices2[k + 2] = self._inputSlices[i][1].start self._inputSlices2[k + 3] = self._inputSlices[i][1].stop k = k + 4 # Compute the coinc slices for each cell. This is which portion of the # cell's permanence matrix intersects with the input. if self._hasTopology: if self.inputShape[0] > 1: self._coincSlices = [ numpy.s_[max(0, coincInputRadius - cy): min(coincHeight, coincInputRadius + inputShape[0] - cy), max(0, coincInputRadius-cx): min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [ numpy.s_[0:1, max(0, coincInputRadius-cx): min(coincWidth, coincInputRadius + inputShape[1] - cx)] for (cy, cx) in self._columnCenters] else: self._coincSlices = [numpy.s_[0:coincHeight, 0:coincWidth] for (cy, cx) in self._columnCenters] self._coincSlices2 = numpy.zeros((4*len(self._coincSlices)), dtype="uint32") k = 0 for i in range(len(self._coincSlices)): self._coincSlices2[k] = self._coincSlices[i][0].start self._coincSlices2[k + 1] = self._coincSlices[i][0].stop self._coincSlices2[k + 2] = self._coincSlices[i][1].start self._coincSlices2[k + 3] = self._coincSlices[i][1].stop k = k + 4 @staticmethod def _computeCoincCenters(inputShape, coincidencesShape, inputBorder): """Compute the centers of all coincidences, given parameters. This function is semi-public: tools may use it to generate good visualizations of what the FDRCSpatial node is doing. NOTE: It must be static or global function so that it can be called by the ColumnActivityTab inspector *before* the first compute (before the SP has been constructed). If the input shape is (7,20), shown below with * for each input. ******************** ******************** ******************** ******************** ******************** ******************** ******************** If inputBorder is 1, we distribute the coincidences evenly over the the area after removing the edges, @ shows the allowed input area below. ******************** *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* *@@@@@@@@@@@@@@@@@@* ******************** Each coincidence is centered at the closest @ and looks at a area with coincInputRadius below it. This function call returns an iterator over the coincidence centers. Each element in iterator is a tuple: (y, x). The iterator returns elements in a fixed order. """ # Determine Y centers if inputShape[0] > 1: # 2-D layout startHeight = inputBorder stopHeight = inputShape[0] - inputBorder else: startHeight = stopHeight = 0 heightCenters = numpy.linspace(startHeight, stopHeight, coincidencesShape[0], endpoint=False).astype('int32') # Determine X centers startWidth = inputBorder stopWidth = inputShape[1] - inputBorder widthCenters = numpy.linspace(startWidth, stopWidth, coincidencesShape[1], endpoint=False).astype('int32') return list(cross(heightCenters, widthCenters)) def _updateInhibitionObj(self): """ Calculate the average inhibitionRadius to use and update the inhibition object accordingly. This looks at the size of the average connected receptive field and uses that to determine the inhibition radius. """ # Compute the inhibition radius. # If using global inhibition, just set it to include the entire region if self.globalInhibition: avgRadius = max(self.coincidencesShape) # Else, set it based on the average size of the connected synapses area in # each cell. else: totalDim = 0 # Get the dimensions of the connected receptive fields of each cell to # compute the average minDim = numpy.inf maxDim = 0 for masterNum in xrange(self.numCloneMasters): masterConnected = self._masterConnectedM[masterNum] nzs = masterConnected.getAllNonZeros() rows, cols = zip(*nzs) rows = numpy.array(rows) cols = numpy.array(cols) if len(rows) >= 2: height = rows.max() - rows.min() + 1 else: height = 1 if len(cols) >= 2: width = cols.max() - cols.min() + 1 else: width = 1 avgDim = (height + width) / 2.0 minDim = min(minDim, avgDim) maxDim = max(maxDim, avgDim) totalDim += avgDim # Get average width/height in input space avgDim = totalDim / self.numCloneMasters self._rfRadiusAvg = (avgDim - 1.0) / 2.0 self._rfRadiusMin = (minDim - 1.0) / 2.0 self._rfRadiusMax = (maxDim - 1.0) / 2.0 # How many columns in cell space does it correspond to? if self.inputShape[0] > 1: # 2-D layout coincsPerInputX = (float(self.coincidencesShape[1]) / (self.inputShape[1] - 2 * self.inputBorder)) coincsPerInputY = (float(self.coincidencesShape[0]) / (self.inputShape[0] - 2 * self.inputBorder)) else: coincsPerInputX = coincsPerInputY = ( float(self.coincidencesShape[1] * self.coincidencesShape[0]) / (self.inputShape[1] - 2 * self.inputBorder)) avgDim *= (coincsPerInputX + coincsPerInputY) / 2 avgRadius = (avgDim - 1.0) / 2.0 avgRadius = max(1.0, avgRadius) # Can't be greater than the overall width or height of the level maxDim = max(self.coincidencesShape) avgRadius = min(avgRadius, maxDim) avgRadius = int(round(avgRadius)) # Is there a need to re-instantiate the inhibition object? if (self._inhibitionObj is None or self._inhibitionObj.getInhibitionRadius() != avgRadius): # What is our target density? if self.localAreaDensity > 0: localAreaDensity = self.localAreaDensity else: numCellsPerInhArea = (avgRadius * 2.0 + 1.0) ** 2 totalCells = self.coincidencesShape[0] * self.coincidencesShape[1] numCellsPerInhArea = min(numCellsPerInhArea, totalCells) localAreaDensity = float(self.numActivePerInhArea) / numCellsPerInhArea # Don't let it be greater than 0.50 localAreaDensity = min(localAreaDensity, 0.50) if self.spVerbosity >= 2: print "Updating inhibition object:" print " avg. rfRadius:", self._rfRadiusAvg print " avg. inhRadius:", avgRadius print " Setting density to:", localAreaDensity self._inhibitionObj = Inhibition2(self.coincidencesShape[0], # height self.coincidencesShape[1], # width avgRadius, # inhRadius localAreaDensity) # density def _updateMinDutyCycles(self, actDutyCycles, minPctDutyCycle, minDutyCycles): """ Calculate and update the minimum acceptable duty cycle for each cell based on the duty cycles of the cells within its inhibition radius and the minPctDutyCycle. Parameters: ----------------------------------------------------------------------- actDutyCycles: The actual duty cycles of all cells minPctDutyCycle: Each cell's minimum duty cycle will be set to minPctDutyCycle times the duty cycle of the most active cell within its inhibition radius minDutyCycles: This array will be updated in place with the new minimum acceptable duty cycles """ # What is the inhibition radius? inhRadius = self._inhibitionObj.getInhibitionRadius() # Reshape the actDutyCycles to match the topology of the level cloningOn = (self.numCloneMasters != self._coincCount) if not cloningOn: actDutyCycles = actDutyCycles.reshape(self.coincidencesShape) minDutyCycles = minDutyCycles.reshape(self.coincidencesShape) # Special, faster handling when inhibition radius includes the entire # set of cells. if cloningOn or inhRadius >= max(self.coincidencesShape): minDutyCycle = minPctDutyCycle * actDutyCycles.max() minDutyCycles.fill(minPctDutyCycle * actDutyCycles.max()) # Else, process each cell else: (numRows, numCols) = self.coincidencesShape for row in xrange(numRows): top = max(0, row - inhRadius) bottom = min(row + inhRadius + 1, numRows) for col in xrange(numCols): left = max(0, col - inhRadius) right = min(col + inhRadius + 1, numCols) maxDutyCycle = actDutyCycles[top:bottom, left:right].max() minDutyCycles[row, col] = maxDutyCycle * minPctDutyCycle if self.spVerbosity >= 2: print "Actual duty cycles:" print fdru.numpyStr(actDutyCycles, '%.4f') print "Recomputed min duty cycles, using inhRadius of", inhRadius print fdru.numpyStr(minDutyCycles, '%.4f') def _computeOverlapsPy(self, inputShaped, stimulusThreshold): """ Computes overlaps for every column for the current input in place. The overlaps less than stimulus threshold are set to zero here. For columns with input RF going off the edge of input field, only regions within the input field are considered. This is equivalent to padding the input field with zeros. Parameters: ------------------------------------------------------------------------ inputShaped: input at the current time step, shaped to the input topology stimulusThreshold: stimulusThreshold to use Member variables used/updated: ------------------------------------------------------------------------ _inputSlices: Index into the input (assuming it's been shaped to a 2D array) to get the receptive field of each column. _coincSlices: Index into the coinc (assuming it's been shaped to a 2D array) to get the valid region of each column. _overlaps: Result is placed into this array which holds the overlaps of each column with the input """ flatInput = inputShaped.reshape(-1) self._allConnectedM.rightVecSumAtNZ_fast(flatInput, self._overlaps) # Apply stimulusThreshold # TODO: Is there a faster numpy operation for this? self._overlaps[self._overlaps < stimulusThreshold] = 0 self._overlapsNoBoost = self._overlaps.copy() def _computeOverlapsCPP(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but using a C++ implementation. """ cpp_overlap(self._cloneMapFlat, self._inputSlices2, self._coincSlices2, inputShaped, self._masterConnectedM, stimulusThreshold, self._overlaps) def _computeOverlapsTest(self, inputShaped, stimulusThreshold): """ Same as _computeOverlapsPy, but compares the python and C++ implementations. """ # Py version self._computeOverlapsPy(inputShaped, stimulusThreshold) overlaps2 = copy.deepcopy(self._overlaps) # C++ version self._computeOverlapsCPP(inputShaped, stimulusThreshold) if (abs(self._overlaps - overlaps2) > 1e-6).any(): print self._overlaps, overlaps2, abs(self._overlaps - overlaps2) import pdb; pdb.set_trace() sys.exit(0) def _raiseAllPermanences(self, masterNum, minConnections=None, densePerm=None, densePotential=None): """ Raise all permanences of the given master. If minConnections is given, the permanences will be raised until at least minConnections of them are connected strength. If minConnections is left at None, all permanences will be raised by self._synPermBelowStimulusInc. After raising all permanences, we also "sparsify" the permanence matrix and set to 0 any permanences which are already very close to 0, this keeps the memory requirements of the sparse matrices used to store the permanences lower. Parameters: ---------------------------------------------------------------------------- masterNum: Which master to bump up minConnections: Desired number of connected synapses to have If None, then all permanences are simply bumped up by self._synPermBelowStimulusInc densePerm: The dense representation of the master's permanence matrix, if available. If not specified, we will create this from the stored sparse representation. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. The stored sparse matrix will ALWAYS be updated from the densePerm if the densePerm is provided. densePotential: The dense representation of the master's potential synapses matrix, if available. If not specified, we will create this from the stored sparse potential matrix. Providing this will avoid some compute overhead. If provided, it is assumed that it is more recent than the stored sparse matrix. retval: (modified, numConnections) modified: True if any permanences were raised numConnections: Number of actual connected synapses (not computed if minConnections was None, so None is returned in that case.) """ # It's faster to perform this operation on the dense matrices and # then convert to sparse once we're done since we will be potentially # introducing and then later removing a bunch of non-zeros. # Get references to the sparse perms and potential syns for this master sparsePerm = self._masterPermanenceM[masterNum] sparsePotential = self._masterPotentialM[masterNum] # We will trim off all synapse permanences below this value to 0 in order # to keep the memory requirements of the SparseMatrix lower trimThreshold = self.synPermActiveInc / 2.0 # See if we already have the required number of connections. If we don't, # get the dense form of the permanences if we don't have them already if densePerm is None: # See if we already have enough connections, if so, we can avoid the # overhead of converting to dense if minConnections is not None: numConnected = sparsePerm.countWhereGreaterEqual( 0, self._coincRFShape[0], 0, self._coincRFShape[1], self.synPermConnected) if numConnected >= minConnections: return (False, numConnected) densePerm = self._masterPermanenceM[masterNum].toDense() elif minConnections is not None: numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (False, numConnected) # Get the dense form of the potential synapse locations if densePotential is None: densePotential = self._masterPotentialM[masterNum].toDense() # Form the array with the increments incrementM = densePotential.astype(realDType) incrementM *= self._synPermBelowStimulusInc # Increment until we reach our target number of connections assert (densePerm.dtype == realDType) while True: densePerm += incrementM if minConnections is None: numConnected = None break numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected) if numConnected >= minConnections: break # Convert back to sparse form and trim any values that are already # close to zero sparsePerm.fromDense(densePerm) sparsePerm.threshold(trimThreshold) return (True, numConnected) def _bumpUpWeakCoincidences(self): """ This bump-up ensures every coincidence have non-zero connections. We find all coincidences which have overlaps less than stimulus threshold. We add synPermActiveInc to all the synapses. This step when repeated over time leads to synapses crossing synPermConnected threshold. """ # Update each cell's connected threshold based on the duty cycle before # inhibition. The connected threshold is linearly interpolated # between the points (dutyCycle:0, thresh:0) and (dutyCycle:minDuty, # thresh:synPermConnected). This is a line defined as: y = mx + b # thresh = synPermConnected/minDuty * dutyCycle bumpUpList = ( self._dutyCycleBeforeInh < self._minDutyCycleBeforeInh).nonzero()[0] for master in bumpUpList: self._raiseAllPermanences(master) # Update the connected synapses for each master we touched. self._updateConnectedCoincidences(bumpUpList) if self.spVerbosity >= 2 and len(bumpUpList) > 0: print ("Bumping up permanences in following cells due to falling below" "minDutyCycleBeforeInh:"), bumpUpList def _updateBoostFactors(self): """ Update the boost factors. The boost factors is linearly interpolated between the points (dutyCycle:0, boost:maxFiringBoost) and (dutyCycle:minDuty, boost:1.0). This is a line defined as: y = mx + b boost = (1-maxFiringBoost)/minDuty * dutyCycle + maxFiringBoost Parameters: ------------------------------------------------------------------------ boostFactors: numpy array of boost factors, defined per master """ if self._minDutyCycleAfterInh.sum() > 0: self._firingBoostFactors = ( (1 - self.maxFiringBoost) / self._minDutyCycleAfterInh * self._dutyCycleAfterInh + self.maxFiringBoost) self._firingBoostFactors[self._dutyCycleAfterInh > self._minDutyCycleAfterInh] = 1.0 def _updateInputUse(self, onCellIndices): """ During learning (adapting permanence values), we need to be able to tell which inputs are going to 2 or more active cells at once. We step through each coinc and mark all the inputs it is connected to. The inputUse array acts as a counter for the number of connections to the coincs from each input. Parameters: ------------------------------------------------------------------------ inputUse: numpy array of number of coincs connected to each input """ allConnected = SM32(self._allConnectedM) # TODO: avoid this copy self._inputUse[:] = allConnected.addListOfRows( onCellIndices).reshape(self.inputShape) def _adaptSynapses(self, onCellIndices, orphanCellIndices, input): """ This is the main function in learning of SP. The permanence values are changed based on the learning rules. Parameters: ------------------------------------------------------------------------ onCellIndices: columns which are turned on after inhibition. The permanence values of these coincs are adapted based on the input. orphanCellIndices: columns which had very high overlap with the input, but ended up being inhibited input: Input, shaped to the input topology retval: list of masterCellIndices that were actually updated, or None if cloning is off """ # Capturing learning stats? if self.printPeriodicStats > 0: self._stats['explainedInputsCurIteration'] = set() # Precompute the active, inactive, and dupe inputs up front for speed # TODO: put these into pre-allocated arrays for speed self._activeInput[:] = input # Create a matrix containing the default permanence deltas for each input self._permChanges.fill(-1 * self.synPermInactiveDec) self._permChanges[self._activeInput] = self.synPermActiveInc if self.synPermActiveSharedDec != 0: numpy.logical_and(self._activeInput, self._inputUse>1, self._dupeInput) self._permChanges[self._dupeInput] -= self.synPermActiveSharedDec # Cloning? If so, scramble the onCells so that we pick a random one to # update for each master. We only update a master cell at most one time # per input presentation. cloningOn = (self.numCloneMasters != self._coincCount) if cloningOn: # Scramble the onCellIndices so that we pick a random one to update onCellIndices = list(onCellIndices) random.shuffle(onCellIndices) visitedMasters = set() # For the firing cells, update permanence values for columnNum in itertools.chain(onCellIndices, orphanCellIndices): # Get the master number masterNum = self._cloneMapFlat[columnNum] # If cloning, only visit each master once if cloningOn: if masterNum in visitedMasters: continue visitedMasters.add(masterNum) # Get the slices of input that overlap with the valid area of this master inputSlice = self._inputSlices[columnNum] rfActiveInput = self._activeInput[inputSlice] rfPermChanges = self._permChanges[inputSlice] # Get the potential synapses, permanence values, and connected synapses # for this master masterPotential = self._masterPotentialM[masterNum].toDense() masterPermanence = self._masterPermanenceM[masterNum].toDense() masterConnected = ( self._masterConnectedM[masterNum].toDense().astype('bool')) # Make changes only over the areas that overlap the input level. For # coincidences near the edge of the level for example, this excludes the # synapses outside the edge. coincSlice = self._coincSlices[columnNum] masterValidPermanence= masterPermanence[coincSlice] # Capturing learning stats? if self.printPeriodicStats > 0: masterValidConnected = masterConnected[coincSlice] explainedInputs = self._inputLayout[inputSlice][masterValidConnected] self._stats['explainedInputsCurIteration'].update(explainedInputs) if self.spVerbosity >= 3: print " adapting cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " initialConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) print " firingLevel: %d" % (self._overlaps[columnNum]) print " firingBoostFactor: %f" % (self._firingBoostFactors[masterNum]) print " input slice: \n" self._printInputSlice(rfActiveInput, prefix=' ') # Update permanences given the active input (NOTE: The "FP" in this # function name stands for "Function Pointer"). if columnNum in orphanCellIndices: # Decrease permanence of active inputs masterValidPermanence[rfActiveInput] -= self.synPermOrphanDec else: self._updatePermanenceGivenInputFP(columnNum, masterNum, input, self._inputUse, masterPermanence, masterValidPermanence, rfActiveInput, rfPermChanges) # Clip to absolute min and max permanence values numpy.clip(masterPermanence, self._synPermMin, self._synPermMax, out=masterPermanence) # Keep only the potential syns for this cell numpy.multiply(masterPermanence, masterPotential, masterPermanence) # If we are tracking learning stats, prepare to see how many changes # were made to the cell connections if self.printPeriodicStats > 0: masterConnectedOrig = SM_01_32_32(self._masterConnectedM[masterNum]) # If the number of connected synapses happens to fall below # stimulusThreshold, bump up all permanences a bit. # We could also just wait for the "duty cycle falls below # minDutyCycleBeforeInb" logic to catch it, but doing it here is # pre-emptive and much faster. # # The "duty cycle falls below minDutyCycleBeforeInb" logic will still # catch other possible situations, like: # * if the set of inputs a cell learned suddenly stop firing due to # input statistic changes # * damage to the level below # * input is very sparse and we still don't pass stimulusThreshold even # with stimulusThreshold conneted synapses. self._raiseAllPermanences(masterNum, minConnections=self.stimulusThreshold, densePerm=masterPermanence, densePotential=masterPotential) # Update the matrices that contain the connected syns for this cell. self._updateConnectedCoincidences([masterNum]) # If we are tracking learning stats, see how many changes were made to # this cell's connections if self.printPeriodicStats > 0: origNumConnections = masterConnectedOrig.nNonZeros() masterConnectedOrig.logicalAnd(self._masterConnectedM[masterNum]) numUnchanged = masterConnectedOrig.nNonZeros() numChanges = origNumConnections - numUnchanged numChanges += (self._masterConnectedM[masterNum].nNonZeros() - numUnchanged) self._stats['numChangedConnectionsSum'][masterNum] += numChanges self._stats['numLearns'][masterNum] += 1 # Verbose? if self.spVerbosity >= 3: print " done cell:%d [%d:%d] (master:%d)" % (columnNum, columnNum // self.coincidencesShape[1], columnNum % self.coincidencesShape[1], masterNum) print " newConnected: %d" % \ (self._masterConnectedM[masterNum].nNonZeros()) self._printSyns(columnNum, prefix=' ', showValues=(self.spVerbosity >= 4)) print # Return list of updated masters if cloningOn: return list(visitedMasters) else: return onCellIndices def _updatePermanenceGivenInputPy( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Given the input to a master coincidence, update it's permanence values based on our learning rules. On Entry, we are given the slice of the permanence matrix that corresponds only to the area of the coincidence master that is within the borders of the entire input field. Parameters: ------------------------------------------------------------------------ columnNum: The column number of this cell masterNum: The master coincidence that corresponds to this column input: The entire input, shaped appropriately inputUse: The same shape as input. Each entry is a count of the number of *currently active cells* that are connected to that input. permanence: The entire masterPermanence matrix for this master permanenceSlice: The slice of the masterPermanence matrix for this master that intersects the input field, i.e. does not overhang the outside edges of the input. activeInputSlice: The portion of 'input' that intersects permanenceSlice, set to True where input != 0 permChangesSlice: The portion of 'input' that intersects permanenceSlice, set to self.synPermActiveInc where input != 0 and self.synPermInactiveDec where the input == 0. This is used to optimally apply self.synPermActiveInc and self.synPermInactiveDec at the same time and can be used for any cell whose _synPermBoostFactor is set to 1.0. """ # TODO: This function does nothing. # Apply the baseline increment/decrements permanenceSlice += permChangesSlice # If this cell has permanence boost, apply the incremental def _updatePermanenceGivenInputCPP( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but using a C++ implementation. """ inputNCols = self.inputShape[1] masterNCols = self._masterPotentialM[masterNum].shape[1] # TODO: synPermBoostFactors has been removed. CPP implementation has not # been updated for this. adjustMasterValidPermanence(columnNum, masterNum, inputNCols, masterNCols, self.synPermActiveInc, self.synPermInactiveDec, self.synPermActiveSharedDec, input, inputUse, self._inputSlices2, self._coincSlices2, self._synPermBoostFactors, permanence) def _updatePermanenceGivenInputTest( self, columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice): """ Same as _updatePermanenceGivenInputPy, but compares the python and C++ implementations. """ mp2 = copy.deepcopy(permanence) mvp2 = copy.deepcopy(permanenceSlice) # Py version import pdb; pdb.set_trace() self._updatePermanenceGivenInputPy(columnNum, masterNum, input, inputUse, permanence, permanenceSlice, activeInputSlice, permChangesSlice) # C++ version self._updatePermanenceGivenInputCPP(columnNum, masterNum, input, inputUse, mp2, mvp2, activeInputSlice, permChangesSlice) if abs(mp2 - permanence).max() > 1e-6: print abs(mp2 - permanence).max() import pdb; pdb.set_trace() sys.exit(0) def _periodicStatsCreate(self): """ Allocate the periodic stats structure """ self._stats = dict() self._stats['numChangedConnectionsSum'] = numpy.zeros( self.numCloneMasters, dtype=realDType) self._stats['numLearns'] = numpy.zeros( self.numCloneMasters, dtype=realDType) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) self._stats['maxBoostFactor'] = numpy.zeros(self.numCloneMasters, dtype=realDType) # This dict maintains mappings of specific input patterns to specific # output patterns. It is used to detect "thrashing" of cells. We measure # how similar the output presentation of a specific input is to the # last time we saw it. self._stats['inputPatterns'] = dict() self._stats['inputPatternsLimit'] = 5000 self._periodicStatsReset() def _periodicStatsReset(self): """ Reset the periodic stats this is done every N iterations before capturing a new set of stats. """ self._stats['numSamples'] = 0 self._stats['numOnSum'] = 0 self._stats['underCoveragePctSum'] = 0 self._stats['overCoveragePctSum'] = 0 self._stats['cellOverlapSums'] = 0 self._stats['cellPctOverlapSums'] = 0 self._stats['explainedInputsCurIteration'] = set() self._stats['startTime'] = time.time() # These keep a count of the # of changed connections per update # for each master self._stats['numChangedConnectionsSum'].fill(0) self._stats['numLearns'].fill(0) # These keep track of the min and max boost factor seen for each # column during each training period self._stats['minBoostFactor'].fill(self.maxFiringBoost) self._stats['maxBoostFactor'].fill(0) # This keeps track of the average distance between the SP output of # a specific input pattern now and the last time we saw it. self._stats['outputPatternDistanceSum'] = 0 self._stats['outputPatternSamples'] = 0 def _periodicStatsComputeEnd(self, activeCells, activeInputs): """ Called at the end of compute. This increments the number of computes and also summarizes the under and over coverage and whatever other periodic stats we need. If the period is up, it then prints the accumuated stats and resets them for the next period Parameters: ------------------------------------------------------------------ activeCells: list of the active cells activeInputs: list of the active inputs """ # Update number of samples self._stats['numSamples'] += 1 # Compute under and over coverage numOn = len(activeCells) self._stats['numOnSum'] += numOn expInput = self._stats['explainedInputsCurIteration'] inputLen = len(activeInputs) underCoverage = len(set(activeInputs).difference(expInput)) self._stats['underCoveragePctSum'] += float(underCoverage) / inputLen expInput.difference_update(activeInputs) overCoverage = len(expInput) self._stats['overCoveragePctSum'] += float(overCoverage) / inputLen # Keep track of the min and max boost factor seen for each column numpy.minimum(self._firingBoostFactors, self._stats['minBoostFactor'], self._stats['minBoostFactor']) numpy.maximum(self._firingBoostFactors, self._stats['maxBoostFactor'], self._stats['maxBoostFactor']) # Calculate the distance in the SP output between this input now # and the last time we saw it. inputPattern = str(sorted(activeInputs)) outputNZ, sampleIdx = self._stats['inputPatterns'].get(inputPattern, (None, None)) activeCellSet = set(activeCells) if outputNZ is not None: distance = (len(activeCellSet.difference(outputNZ)) + len(outputNZ.difference(activeCellSet))) self._stats['inputPatterns'][inputPattern] = (activeCellSet, sampleIdx) self._stats['outputPatternDistanceSum'] += distance self._stats['outputPatternSamples'] += 1 # Add this sample to our dict, if it's not too large already elif len(self._stats['inputPatterns']) < self._stats['inputPatternsLimit']: self._stats['inputPatterns'][inputPattern] = (activeCellSet, self._iterNum) # If it's not time to print them out, return now. if (self._iterNum % self.printPeriodicStats) != 0: return numSamples = float(self._stats['numSamples']) # Calculate number of changes made per master masterTouched = numpy.where(self._stats['numLearns'] > 0) if len(masterTouched[0]) == 0: numMasterChanges = numpy.zeros(1) else: numMasterChanges = self._stats['numChangedConnectionsSum'][masterTouched] numMasterChanges /= self._stats['numLearns'][masterTouched] # This fills in the static learning stats into self._learningStats self.getLearningStats() # Calculate and copy the transient learning stats into the # self._learningStats dict, for possible retrieval later by # the getLearningStats() method. self._learningStats['elapsedTime'] = time.time() - self._stats['startTime'] self._learningStats['activeCountAvg'] = (self._stats['numOnSum'] / numSamples) self._learningStats['underCoveragePct'] = ( 100.0 * self._stats['underCoveragePctSum'] / numSamples) self._learningStats['overCoveragePct'] = ( (100.0 * self._stats['overCoveragePctSum'] / numSamples)) self._learningStats['numConnectionChangesAvg'] = numMasterChanges.mean() self._learningStats['numConnectionChangesMin'] = numMasterChanges.min() self._learningStats['numConnectionChangesMax'] = numMasterChanges.max() self._learningStats['avgCellOverlap'] = ( (float(self._stats['cellOverlapSums']) / max(1, self._stats['numOnSum']))) self._learningStats['avgCellPctOverlap'] = ( (100.0 * self._stats['cellPctOverlapSums'] / max(1, self._stats['numOnSum']))) self._learningStats['firingBoostMaxChangePct'] = ( 100.0 * (self._stats['maxBoostFactor'] / self._stats['minBoostFactor']).max() - 100.0) self._learningStats['outputRepresentationChangeAvg'] = ( float(self._stats['outputPatternDistanceSum']) / max(1, self._stats['outputPatternSamples'])) self._learningStats['outputRepresentationChangePctAvg'] = ( 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) self._learningStats['numUniqueInputsSeen'] = ( len(self._stats['inputPatterns'])) if (self._learningStats['numUniqueInputsSeen'] >= self._stats['inputPatternsLimit']): self._learningStats['numUniqueInputsSeen'] = -1 # Print all stats captured print "Learning stats for the last %d iterations:" % (numSamples) print " iteration #: %d" % (self._iterNum) print " inference iteration #: %d" % (self._inferenceIterNum) print " elapsed time: %.2f" % ( self._learningStats['elapsedTime']) print " avg activeCount: %.1f" % ( self._learningStats['activeCountAvg']) print " avg under/overCoverage: %-6.1f / %-6.1f %%" % ( self._learningStats['underCoveragePct'], self._learningStats['overCoveragePct']) print " avg cell overlap: %-6.1f / %-6.1f %%" % ( self._learningStats['avgCellOverlap'], self._learningStats['avgCellPctOverlap']) print " avg/min/max RF radius: %-6.1f / %-6.1f / %-6.1f" % ( self._learningStats['rfRadiusAvg'], self._learningStats['rfRadiusMin'], self._learningStats['rfRadiusMax']) print " inhibition radius: %d" % ( self._learningStats['inhibitionRadius']) print " target density: %.5f %%" % ( self._learningStats['targetDensityPct']) print " avg/min/max coinc. size: %-6.1f / %-6d / %-6d" % ( self._learningStats['coincidenceSizeAvg'], self._learningStats['coincidenceSizeMin'], self._learningStats['coincidenceSizeMax']) print " avg/min/max DC before inh: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['dcBeforeInhibitionAvg'], self._learningStats['dcBeforeInhibitionMin'], self._learningStats['dcBeforeInhibitionMax']) print " avg/min/max DC after inh: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['dcAfterInhibitionAvg'], self._learningStats['dcAfterInhibitionMin'], self._learningStats['dcAfterInhibitionMax']) print " avg/min/max boost: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['firingBoostAvg'], self._learningStats['firingBoostMin'], self._learningStats['firingBoostMax']) print " avg/min/max # conn. changes: %-6.4f / %-6.4f / %-6.4f" % ( self._learningStats['numConnectionChangesAvg'], self._learningStats['numConnectionChangesMin'], self._learningStats['numConnectionChangesMax']) print " max change in boost: %.1f %%" % ( self._learningStats['firingBoostMaxChangePct']) print " avg change in output repr.: %-6.1f / %-6.1f %%" % ( self._learningStats['outputRepresentationChangeAvg'], 100.0 * self._learningStats['outputRepresentationChangeAvg'] / max(1,self._learningStats['activeCountAvg'])) print " # of unique input pats seen: %d" % ( self._learningStats['numUniqueInputsSeen']) # Reset the stats for the next period. self._periodicStatsReset() def _printInputSlice(self, inputSlice, prefix=''): """Print the given input slice in a nice human readable format. Parameters: --------------------------------------------------------------------- cell: The slice of input to print prefix: This is printed at the start of each row of the coincidence """ # Shape of each coincidence rfHeight, rfWidth = inputSlice.shape syns = inputSlice != 0 def _synStr(x): if not x: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) print prefix, ''.join(items) def _printSyns(self, cell, prefix='', showValues=False): """Print the synapse permanence values for the given cell in a nice, human, readable format. Parameters: --------------------------------------------------------------------- cell: which cell to print prefix: This is printed at the start of each row of the coincidence showValues: If True, print the values of each permanence. If False, just print a ' ' if not connected and a '*' if connected """ # Shape of each coincidence (rfHeight, rfWidth) = self.inputShape # Get the synapse permanences. masterNum = self._cloneMapFlat[cell] syns = self._masterPermanenceM[masterNum].toDense() if showValues: def _synStr(x): if x == 0: return ' -- ' elif x < 0.001: return ' 0 ' elif x >= self.synPermConnected: return '#%3.2f' % x else: return ' %3.2f' % x else: def _synStr(x): if x < self.synPermConnected: return ' ' else: return '*' # Print them out for row in xrange(syns.shape[0]): items = map(_synStr, syns[row]) if showValues: print prefix, ' '.join(items) else: print prefix, ''.join(items) def _printMemberSizes(self, over=100): """Print the size of each member.""" members = self.__dict__.keys() sizeNamePairs = [] totalSize = 0 for member in members: item = self.__dict__[member] if hasattr(item, '__func__'): continue try: if hasattr(item, '__len__'): size = 0 for i in xrange(len(item)): size += len(cPickle.dumps(item[i])) else: size = len(cPickle.dumps(item)) except: print "WARNING: Can't pickle %s" % (member) size = 0 sizeNamePairs.append((size, member)) totalSize += size # Print them out from highest to lowest sizeNamePairs.sort(reverse=True) for (size, name) in sizeNamePairs: if size > over: print "%10d (%10.3fMb) %s" % (size, size/1000000.0, name) print "\nTOTAL: %10d (%10.3fMB) " % (totalSize, totalSize/1000000.0) def printParams(self): """Print the main creation parameters associated with this instance.""" print "FDRCSpatial2 creation parameters: " print "inputShape =", self.inputShape print "inputBorder =", self.inputBorder print "inputDensity =", self.inputDensity print "coincidencesShape =", self.coincidencesShape print "coincInputRadius =", self.coincInputRadius print "coincInputPoolPct =", self.coincInputPoolPct print "gaussianDist =", self.gaussianDist print "commonDistributions =", self.commonDistributions print "localAreaDensity =", self.localAreaDensity print "numActivePerInhArea =", self.numActivePerInhArea print "stimulusThreshold =", self.stimulusThreshold print "synPermInactiveDec =", self.synPermInactiveDec print "synPermActiveInc =", self.synPermActiveInc print "synPermActiveSharedDec =", self.synPermActiveSharedDec print "synPermOrphanDec =", self.synPermOrphanDec print "synPermConnected =", self.synPermConnected print "minPctDutyCycleBeforeInh =", self.minPctDutyCycleBeforeInh print "minPctDutyCycleAfterInh =", self.minPctDutyCycleAfterInh print "dutyCyclePeriod =", self.dutyCyclePeriod print "maxFiringBoost =", self.maxFiringBoost print "maxSSFiringBoost =", self.maxSSFiringBoost print "maxSynPermBoost =", self.maxSynPermBoost print "useHighTier =",self.useHighTier print "minDistance =", self.minDistance print "spVerbosity =", self.spVerbosity print "printPeriodicStats =", self.printPeriodicStats print "testMode =", self.testMode print "numCloneMasters =", self.numCloneMasters
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libtree(CMakePackage): """ldd as a tree with an option to bundle dependencies into a single folder""" homepage = "https://github.com/haampie/libtree" url = "https://github.com/haampie/libtree/releases/download/v1.0.3/sources.tar.gz" maintainers = ['haampie'] version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd') version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0') version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97') version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e') version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b') version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b') libtree: add v1.2.3 (#25270) # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libtree(CMakePackage): """ldd as a tree with an option to bundle dependencies into a single folder""" homepage = "https://github.com/haampie/libtree" url = "https://github.com/haampie/libtree/releases/download/v1.0.3/sources.tar.gz" maintainers = ['haampie'] version('1.2.3', sha256='4a912cf97109219fe931942a30579336b6ab9865395447bd157bbfa74bf4e8cf') version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd') version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0') version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97') version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e') version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b') version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b')
from unittest import TestCase from os.path import (expanduser, join, isdir, isfile) from os import (remove, makedirs) from shutil import rmtree from glob import glob from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig class NiftyNetGlobalConfigTest(TestCase): """For reliably testing the global config file, the tests are grouped and ordered by including a number. https://docs.python.org/2/library/unittest.html says: "Note that the order in which the various test cases will be run is determined by sorting the test function names with respect to the built-in ordering for strings." """ @classmethod def typify(cls, file_path): """Append file type extension to passed file path.""" return '.'.join([file_path, cls.file_type]) @classmethod def remove_path(cls, path): """Remove passed item, whether it's a file or directory.""" if isdir(path): rmtree(path) elif isfile(path): remove(path) @classmethod def setUpClass(cls): cls.config_home = join(expanduser('~'), '.niftynet') cls.file_type = 'ini' cls.config_file = join(cls.config_home, cls.typify('config')) cls.header = '[global]' cls.default_config_opts = { 'home': '~/niftynet' } @classmethod def tearDownClass(cls): # TODO pass def setUp(self): NiftyNetGlobalConfigTest.remove_path(NiftyNetGlobalConfigTest.config_home) NiftyNetGlobalConfigTest.remove_path( NiftyNetGlobalConfigTest.default_config_opts['home'] ) def tearDown(self): self.setUp() def test_000_global_config_singleton(self): global_config_1 = NiftyNetGlobalConfig() global_config_2 = NiftyNetGlobalConfig() self.assertEqual(global_config_1, global_config_2) self.assertTrue(global_config_1 is global_config_2) def test_010_non_existing_config_file_created(self): self.assertFalse(isfile(NiftyNetGlobalConfigTest.config_file)) global_config = NiftyNetGlobalConfig() self.assertTrue(isfile(NiftyNetGlobalConfigTest.config_file)) self.assertEqual(global_config.get_niftynet_config_folder(), NiftyNetGlobalConfigTest.config_home) def test_011_existing_config_file_loaded(self): # create a config file with a custom NiftyNet home makedirs(NiftyNetGlobalConfigTest.config_home) custom_niftynet_home = '~/customniftynethome' custom_niftynet_home_abs = expanduser(custom_niftynet_home) config = ''.join(['home = ', custom_niftynet_home]) with open(NiftyNetGlobalConfigTest.config_file, 'w') as config_file: config_file.write('\n'.join( [NiftyNetGlobalConfigTest.header, config])) global_config = NiftyNetGlobalConfig() self.assertEqual(global_config.get_niftynet_home_folder(), custom_niftynet_home_abs) def test_012_incorrect_config_file_backed_up(self): # create an incorrect config file at the correct location makedirs(NiftyNetGlobalConfigTest.config_home) incorrect_config = 'invalid_home_tag = ~/niftynet' with open(NiftyNetGlobalConfigTest.config_file, 'w') as config_file: config_file.write('\n'.join( [NiftyNetGlobalConfigTest.header, incorrect_config])) # the following should back it up and replace it with default config global_config = NiftyNetGlobalConfig() self.assertTrue(isfile(NiftyNetGlobalConfigTest.config_file)) self.assertEqual(global_config.get_niftynet_config_folder(), NiftyNetGlobalConfigTest.config_home) # check if incorrect file was backed up found_files = glob(join(NiftyNetGlobalConfigTest.config_home, NiftyNetGlobalConfigTest.typify('config-backup-*'))) self.assertTrue(len(found_files) == 1) with open(found_files[0], 'r') as backup_file: self.assertEqual(backup_file.read(), incorrect_config) # cleanup: remove backup file NiftyNetGlobalConfigTest.remove_path(backup_file) Issue #182: using expanduser instead of path literal when removing created home dir in tests from unittest import TestCase from os.path import (expanduser, join, isdir, isfile) from os import (remove, makedirs) from shutil import rmtree from glob import glob from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig class NiftyNetGlobalConfigTest(TestCase): """For reliably testing the global config file, the tests are grouped and ordered by including a number. https://docs.python.org/2/library/unittest.html says: "Note that the order in which the various test cases will be run is determined by sorting the test function names with respect to the built-in ordering for strings." """ @classmethod def typify(cls, file_path): """Append file type extension to passed file path.""" return '.'.join([file_path, cls.file_type]) @classmethod def remove_path(cls, path): """Remove passed item, whether it's a file or directory.""" if isdir(path): rmtree(path) elif isfile(path): remove(path) @classmethod def setUpClass(cls): cls.config_home = join(expanduser('~'), '.niftynet') cls.file_type = 'ini' cls.config_file = join(cls.config_home, cls.typify('config')) cls.header = '[global]' cls.default_config_opts = { 'home': '~/niftynet' } @classmethod def tearDownClass(cls): # TODO pass def setUp(self): NiftyNetGlobalConfigTest.remove_path(NiftyNetGlobalConfigTest.config_home) NiftyNetGlobalConfigTest.remove_path( expanduser(NiftyNetGlobalConfigTest.default_config_opts['home']) ) def tearDown(self): self.setUp() def test_000_global_config_singleton(self): global_config_1 = NiftyNetGlobalConfig() global_config_2 = NiftyNetGlobalConfig() self.assertEqual(global_config_1, global_config_2) self.assertTrue(global_config_1 is global_config_2) def test_010_non_existing_config_file_created(self): self.assertFalse(isfile(NiftyNetGlobalConfigTest.config_file)) global_config = NiftyNetGlobalConfig() self.assertTrue(isfile(NiftyNetGlobalConfigTest.config_file)) self.assertEqual(global_config.get_niftynet_config_folder(), NiftyNetGlobalConfigTest.config_home) def test_011_existing_config_file_loaded(self): # create a config file with a custom NiftyNet home makedirs(NiftyNetGlobalConfigTest.config_home) custom_niftynet_home = '~/customniftynethome' custom_niftynet_home_abs = expanduser(custom_niftynet_home) config = ''.join(['home = ', custom_niftynet_home]) with open(NiftyNetGlobalConfigTest.config_file, 'w') as config_file: config_file.write('\n'.join( [NiftyNetGlobalConfigTest.header, config])) global_config = NiftyNetGlobalConfig() self.assertEqual(global_config.get_niftynet_home_folder(), custom_niftynet_home_abs) def test_012_incorrect_config_file_backed_up(self): # create an incorrect config file at the correct location makedirs(NiftyNetGlobalConfigTest.config_home) incorrect_config = 'invalid_home_tag = ~/niftynet' with open(NiftyNetGlobalConfigTest.config_file, 'w') as config_file: config_file.write('\n'.join( [NiftyNetGlobalConfigTest.header, incorrect_config])) # the following should back it up and replace it with default config global_config = NiftyNetGlobalConfig() self.assertTrue(isfile(NiftyNetGlobalConfigTest.config_file)) self.assertEqual(global_config.get_niftynet_config_folder(), NiftyNetGlobalConfigTest.config_home) # check if incorrect file was backed up found_files = glob(join(NiftyNetGlobalConfigTest.config_home, NiftyNetGlobalConfigTest.typify('config-backup-*'))) self.assertTrue(len(found_files) == 1) with open(found_files[0], 'r') as backup_file: self.assertEqual(backup_file.read(), incorrect_config) # cleanup: remove backup file NiftyNetGlobalConfigTest.remove_path(backup_file)
#!/usr/bin/env python ''' Usage: git-bin [-v] [--debug] <command> [--] <file>... git-bin init git-bin (-h|--help|--version) Commands: add store file in binstore and add it's link to the index edit retrieve a file from the binstore for local edit checkout restore the link to the last added version of the file init Options: --help -h print this help --version print version and exit --verbose -v enable verbose printing --debug debug mode ''' # ''' # Usage: # git-bin (add|edit|reset|checkout --) <file>... # git-bin (-h|--help|--version) # ''' import os.path import stat import filecmp import pkg_resources from docopt import docopt import utils import commands as cmd import git class Binstore(object): def __init__(self): pass def init(self): """ Initialize git-bin for this git repository.""" raise NotImplementedError def add_file(self, filename): """ Add the specified file to the binstore. """ raise NotImplementedError def edit_file(self, filename): """ Retrieve the specified file for editing. """ raise NotImplementedError def reset_file(self, filename): """ Reset the specified file. """ def __contains__(self, item): """ Test whether a given item is in this binstore. The item may be a hash or a symlink in the repo """ raise NotImplementedError def available(self): """ Test to see whether the binstore can be reached. """ raise NotImplementedError class SSHFSBinstore(Binstore): pass class BinstoreException(Exception): pass class FilesystemBinstore(Binstore): def __init__(self, gitrepo): Binstore.__init__(self) self.gitrepo = gitrepo # retrieve the binstore path from the .git/config # first look for the binstore base in the git config tree. binstore_base = self.gitrepo.config.get("git-bin", "binstorebase", None) # if that fails, try the environment variable binstore_base = binstore_base or os.environ.get("BINSTORE_BASE", binstore_base) if not binstore_base: raise BinstoreException("No git-bin.binstorebase is specified. You probably want to add this to your ~/.gitconfig") self.init(binstore_base) def init(self, binstore_base): self.localpath = os.path.join(self.gitrepo.path, ".git", "binstore") #self.path = self.gitrepo.config.get("binstore", "path", None) self.path = os.path.join(binstore_base, self.gitrepo.reponame) if not os.path.exists(self.localpath): commands = cmd.CompoundCommand( cmd.MakeDirectoryCommand(self.path), cmd.LinkToFileCommand(self.localpath, self.path), ) commands.execute() #self.gitrepo.config.set("binstore", "path", self.path) if not os.path.exists(self.path): raise BinstoreException("A binstore.path is set (%s), but it doesn't exist. Weird." % self.path) def get_binstore_filename(self, filename): """ get the real filename of a given file in the binstore. """ # Note: this function assumes that the filename is in the binstore. You # probably want to check that first. if os.path.islink(filename): #return os.readlink(filename) return os.path.realpath(filename) digest = utils.md5_file(filename) return os.path.join(self.localpath, digest) def has(self, filename): """ check whether a particular file is in the binstore or not. """ if os.path.islink(filename): link_target = os.path.realpath(filename) if os.path.dirname(link_target) != os.path.realpath(self.localpath): return False return os.path.exists(self.get_binstore_filename(filename)) def add_file(self, filename): binstore_filename = self.get_binstore_filename(filename) # TODO: make hash algorithm configurable # relative link is needed, here, so it points from the file directly to # the .git directory relative_link = os.path.relpath(binstore_filename, os.path.dirname(filename)) # create only a link if file already exists in binstore if os.path.exists(binstore_filename): print('WARNING: File with that hash already exists in binstore.') if filecmp.cmp(filename, binstore_filename): print(' Creating a link to existing file') commands = cmd.CompoundCommand( cmd.SafeRemoveCommand(filename), cmd.LinkToFileCommand(filename, relative_link), cmd.GitAddCommand(self.gitrepo, filename), ) else: raise ValueError('hash collision found between %s and %s', filename, binstore_filename) else: commands = cmd.CompoundCommand( cmd.SafeMoveFileCommand(filename, binstore_filename), cmd.LinkToFileCommand(filename, relative_link), cmd.ChmodCommand(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH, binstore_filename), cmd.GitAddCommand(self.gitrepo, filename), ) commands.execute() def edit_file(self, filename): print "edit_file(%s)" % filename print "binstore_filename: %s" % self.get_binstore_filename(filename) temp_filename = os.path.join(os.path.dirname(filename), ".tmp_%s" % os.path.basename(filename)) print "temp_filename: %s" % temp_filename commands = cmd.CompoundCommand( cmd.CopyFileCommand(self.get_binstore_filename(filename), temp_filename), cmd.SafeMoveFileCommand(temp_filename, filename), ) commands.execute() def is_binstore_link(self, filename): if not os.path.islink(filename): return False print os.readlink(filename) print self.localpath if (os.readlink(filename).startswith(self.localpath) and self.has(os.readlink(filename))): return True return False class CompatabilityFilesystemBinstore(FilesystemBinstore): def __init__(self, gitrepo): FilesystemBinstore.__init__(self, gitrepo) def init(self, binstore_base): self.path = os.path.join(binstore_base, self.gitrepo.reponame) self.localpath = self.path if not os.path.exists(self.path): raise BinstoreException("In compatibility mode, but binstore doesn't exist. What exactly are you trying to pull?") class UnknownCommandException(Exception): pass class GitBin(object): def __init__(self, gitrepo, binstore): self.gitrepo = gitrepo self.binstore = binstore def dispatch_command(self, name, arguments): if not hasattr(self, name): raise UnknownCommandException( "The command '%s' is not known to git-bin" % name) filenames = utils.expand_filenames(arguments['<file>']) getattr(self, name)(filenames) def add(self, filenames): """ Add a list of files, specified by their full paths, to the binstore. """ print "GitBin.add(%s)" % filenames for filename in filenames: print "\t%s" % filename # we want to add broken symlinks as well if not os.path.lexists(filename): print "'%s' did not match any files" % filename continue # if the file is a link, but the target is not in the binstore (i.e. # this was a real symlink originally), we can just add it. This # check is before the check for dirs so that we don't traverse # symlinked dirs. if os.path.islink(filename): if not self.binstore.is_binstore_link(filename): # a symlink, but not into the binstore. Just add the link # itself: self.gitrepo.add(filename) # whether it's a binstore link or not, we can just continue continue if not utils.is_file_binary(filename): self.gitrepo.add(filename) continue # TODO: maybe create an empty file with some marking # now we just skip it if utils.is_file_pipe(filename): continue # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first add all directories recursively len(dirs) and self.add([os.path.join(root, dn) for dn in dirs]) # now add all the files len(files) and self.add([os.path.join(root, fn) for fn in files]) continue # at this point, we're only dealing with a file, so let's add it to # the binstore self.binstore.add_file(filename) def init(self, args): pass # normal git reset works like this: # 1. if the file is staged, it is unstaged. The file itself is untouched. # 2. if the file is unstaged, nothing happens. # To revert local changes in a modified file, you need to perform a # `checkout --`. # 1. if the file is staged, nothing happens. # 2. if the file is tracked and unstaged, it's contents are reset to the # value at head. # 3. if the file is untracked, an error occurs. # (see: http://git-scm.com/book/en/Git-Basics-Undoing-Things) # # legacy git-bin implemented the following logic: # 1. if the file is not binary (note that staged/unstaged is not # differentiated): # 1.1 if the file is added, a `git reset HEAD` is performed. # 1.2 if the file is modified, a `git checkout --` is performed. # 2. if the file is a binary file: # 2.1 if the file is added, the file is copied back from the binstore and # a `git reset HEAD` is performed. # 2.2 if the file is modified # 2.2.1 and its hash is in the binstore: a `git checkout --` is performed. # 2.2.1 but its hash is not in the binstore and there is a typechange, a # copy of the file is saved in /tmp and then the `git checkout --` is # performed. # # essentially we need two distinct operations: # - unstage: just get it out of the index, but don't touch the file # itself.o # For a binary file that has just been git-bin-add-ed, but was not # previously tracked, we will want to revert to the original file contents. # This more closely resembles the intention of the regular unstage operation # - restore: change back to the contents at HEAD. # For a binstore file this would mean switching back to the # symlink. If there was actually a modification, we also want to save a # 'just-in-case' file. # if we use the standard git nomenclature: # - unstage -> reset # - restore -> checkout -- # let's implement these operations separately. We might implement a # compatibility mode. def reset(self, filenames): """ Unstage a list of files """ print "GitBin.reset(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first reset all directories recursively len(dirs) and self.reset([os.path.join(root, dn) for dn in dirs]) # now reset all the files len(files) and self.reset([os.path.join(root, fn) for fn in files]) continue status = self.gitrepo.status(filename) if not status & git.STATUS_STAGED_MASK == git.STATUS_STAGED: # not staged, skip it. print "you probably meant to do: git bin checkout -- %s" % filename continue # unstage the file: self.gitrepo.unstage(filename) # key: F=real file; S=symlink; T=typechange; M=modified; s=staged # {1} ([F] -> GBAdded[Ss]) -> Untracked[S] # {2} ([S] -> GBEdit[TF] -> Modified[TF] -> GBAdded[MSs]) # -> Modified[MS] new_status = self.gitrepo.status(filename) if self.binstore.has(filename) and ( new_status & git.STATUS_UNTRACKED or new_status & git.STATUS_MODIFIED): # TODO: in case {1} it's possible that we might be leaving an # orphan unreferenced file in the binstore. We might want to # deal with this. commands = cmd.CompoundCommand( cmd.CopyFileCommand( self.binstore.get_binstore_filename(filename), filename), ) commands.execute() def checkout(self, filenames): """ Revert local modifications to a list of files """ print "GitBin.checkout(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first checkout_dashdash all directories recursively len(dirs) and self.checkout([os.path.join(root, dn) for dn in dirs]) # now checkout_dashdash all the files len(files) and self.checkout([os.path.join(root, fn) for fn in files]) continue status = self.gitrepo.status(filename) if (status & git.STATUS_STAGED_MASK) == git.STATUS_STAGED: # staged, skip it. print "you probably meant to do: git bin reset %s" % filename continue if not (status & git.STATUS_CHANGED_MASK): # the file hasn't changed, skip it. continue # The first two cases can just be passed through to regular git # checkout --. # {1} (GBAdded[MSs] -> Reset[MS]) # {2} (GBEdit[TF]) # In the third case, there is some local modification that we should # save 'just in case' first. # {3} (GBEdit[TF] -> Modified[TF]) (*) if (status & git.STATUS_TYPECHANGED) and not self.binstore.has(filename): justincase_filename = os.path.join( "/tmp", "%s.%s.justincase" % (filename, utils.md5_file(filename))) commands = cmd.CompoundCommand( cmd.CopyFileCommand( self.binstore.get_binstore_filename(filename), justincase_filename), ) commands.execute() self.gitrepo.restore(filename) def edit(self, filenames): """ Retrieve file contents for editing """ print "GitBin.edit(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first edit all directories recursively len(dirs) and self.edit([os.path.join(root, dn) for dn in dirs]) # now edit all the files len(files) and self.edit([os.path.join(root, fn) for fn in files]) continue if os.path.islink(filename) and self.binstore.has(filename): self.binstore.edit_file(filename) # TODO: # - implement git operations # - impelement binstore # - use symlink in .git/ folder # - reverse lookups # - implement offline/online commands # - use a .gitbin file to store parameters # - init command? # - if file doesn't exist, suggest creating it on first use # - this file should be committed # - detect online binstore available. if so, and was offline, suggest going online. def print_exception(prefix, exception, verbose=False): print "%s: %s" % (prefix, exception) if verbose: import traceback traceback.print_exc() def get_binstore(repo): return FilesystemBinstore(repo) def _main(args): try: gitrepo = git.GitRepo() binstore = get_binstore(gitrepo) gitbin = GitBin(gitrepo, binstore) cmd = args['<command>'] if args['init']: gitbin.dispatch_command('init', args) elif cmd is not None: gitbin.dispatch_command(cmd, args) except git.GitException, e: print_exception("git", e, args['--debug']) print(__doc__) exit(1) except BinstoreException, e: print_exception("binstore", e, args['--debug']) exit(1) except UnknownCommandException, e: print(__doc__) exit(1) def main(): #args = build_options_parser().parse_args() import sys version = pkg_resources.require("gitbin")[0].version args = docopt(__doc__, version=version, options_first=True) if args: _main(args) if __name__ == '__main__': main() Fixed checkout to copy the modified file to /tmp #!/usr/bin/env python ''' Usage: git-bin [-v] [--debug] <command> [--] <file>... git-bin init git-bin (-h|--help|--version) Commands: add store file in binstore and add it's link to the index edit retrieve a file from the binstore for local edit checkout restore the link to the last added version of the file init Options: --help -h print this help --version print version and exit --verbose -v enable verbose printing --debug debug mode ''' # ''' # Usage: # git-bin (add|edit|reset|checkout --) <file>... # git-bin (-h|--help|--version) # ''' import os.path import stat import filecmp import pkg_resources from docopt import docopt import utils import commands as cmd import git class Binstore(object): def __init__(self): pass def init(self): """ Initialize git-bin for this git repository.""" raise NotImplementedError def add_file(self, filename): """ Add the specified file to the binstore. """ raise NotImplementedError def edit_file(self, filename): """ Retrieve the specified file for editing. """ raise NotImplementedError def reset_file(self, filename): """ Reset the specified file. """ def __contains__(self, item): """ Test whether a given item is in this binstore. The item may be a hash or a symlink in the repo """ raise NotImplementedError def available(self): """ Test to see whether the binstore can be reached. """ raise NotImplementedError class SSHFSBinstore(Binstore): pass class BinstoreException(Exception): pass class FilesystemBinstore(Binstore): def __init__(self, gitrepo): Binstore.__init__(self) self.gitrepo = gitrepo # retrieve the binstore path from the .git/config # first look for the binstore base in the git config tree. binstore_base = self.gitrepo.config.get("git-bin", "binstorebase", None) # if that fails, try the environment variable binstore_base = binstore_base or os.environ.get("BINSTORE_BASE", binstore_base) if not binstore_base: raise BinstoreException("No git-bin.binstorebase is specified. You probably want to add this to your ~/.gitconfig") self.init(binstore_base) def init(self, binstore_base): self.localpath = os.path.join(self.gitrepo.path, ".git", "binstore") #self.path = self.gitrepo.config.get("binstore", "path", None) self.path = os.path.join(binstore_base, self.gitrepo.reponame) if not os.path.exists(self.localpath): commands = cmd.CompoundCommand( cmd.MakeDirectoryCommand(self.path), cmd.LinkToFileCommand(self.localpath, self.path), ) commands.execute() #self.gitrepo.config.set("binstore", "path", self.path) if not os.path.exists(self.path): raise BinstoreException("A binstore.path is set (%s), but it doesn't exist. Weird." % self.path) def get_binstore_filename(self, filename): """ get the real filename of a given file in the binstore. """ # Note: this function assumes that the filename is in the binstore. You # probably want to check that first. if os.path.islink(filename): #return os.readlink(filename) return os.path.realpath(filename) digest = utils.md5_file(filename) return os.path.join(self.localpath, digest) def has(self, filename): """ check whether a particular file is in the binstore or not. """ if os.path.islink(filename): link_target = os.path.realpath(filename) if os.path.dirname(link_target) != os.path.realpath(self.localpath): return False return os.path.exists(self.get_binstore_filename(filename)) def add_file(self, filename): binstore_filename = self.get_binstore_filename(filename) # TODO: make hash algorithm configurable # relative link is needed, here, so it points from the file directly to # the .git directory relative_link = os.path.relpath(binstore_filename, os.path.dirname(filename)) # create only a link if file already exists in binstore if os.path.exists(binstore_filename): print('WARNING: File with that hash already exists in binstore.') if filecmp.cmp(filename, binstore_filename): print(' Creating a link to existing file') commands = cmd.CompoundCommand( cmd.SafeRemoveCommand(filename), cmd.LinkToFileCommand(filename, relative_link), cmd.GitAddCommand(self.gitrepo, filename), ) else: raise ValueError('hash collision found between %s and %s', filename, binstore_filename) else: commands = cmd.CompoundCommand( cmd.SafeMoveFileCommand(filename, binstore_filename), cmd.LinkToFileCommand(filename, relative_link), cmd.ChmodCommand(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH, binstore_filename), cmd.GitAddCommand(self.gitrepo, filename), ) commands.execute() def edit_file(self, filename): print "edit_file(%s)" % filename print "binstore_filename: %s" % self.get_binstore_filename(filename) temp_filename = os.path.join(os.path.dirname(filename), ".tmp_%s" % os.path.basename(filename)) print "temp_filename: %s" % temp_filename commands = cmd.CompoundCommand( cmd.CopyFileCommand(self.get_binstore_filename(filename), temp_filename), cmd.SafeMoveFileCommand(temp_filename, filename), ) commands.execute() def is_binstore_link(self, filename): if not os.path.islink(filename): return False print os.readlink(filename) print self.localpath if (os.readlink(filename).startswith(self.localpath) and self.has(os.readlink(filename))): return True return False class CompatabilityFilesystemBinstore(FilesystemBinstore): def __init__(self, gitrepo): FilesystemBinstore.__init__(self, gitrepo) def init(self, binstore_base): self.path = os.path.join(binstore_base, self.gitrepo.reponame) self.localpath = self.path if not os.path.exists(self.path): raise BinstoreException("In compatibility mode, but binstore doesn't exist. What exactly are you trying to pull?") class UnknownCommandException(Exception): pass class GitBin(object): def __init__(self, gitrepo, binstore): self.gitrepo = gitrepo self.binstore = binstore def dispatch_command(self, name, arguments): if not hasattr(self, name): raise UnknownCommandException( "The command '%s' is not known to git-bin" % name) filenames = utils.expand_filenames(arguments['<file>']) getattr(self, name)(filenames) def add(self, filenames): """ Add a list of files, specified by their full paths, to the binstore. """ print "GitBin.add(%s)" % filenames for filename in filenames: print "\t%s" % filename # we want to add broken symlinks as well if not os.path.lexists(filename): print "'%s' did not match any files" % filename continue # if the file is a link, but the target is not in the binstore (i.e. # this was a real symlink originally), we can just add it. This # check is before the check for dirs so that we don't traverse # symlinked dirs. if os.path.islink(filename): if not self.binstore.is_binstore_link(filename): # a symlink, but not into the binstore. Just add the link # itself: self.gitrepo.add(filename) # whether it's a binstore link or not, we can just continue continue if not utils.is_file_binary(filename): self.gitrepo.add(filename) continue # TODO: maybe create an empty file with some marking # now we just skip it if utils.is_file_pipe(filename): continue # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first add all directories recursively len(dirs) and self.add([os.path.join(root, dn) for dn in dirs]) # now add all the files len(files) and self.add([os.path.join(root, fn) for fn in files]) continue # at this point, we're only dealing with a file, so let's add it to # the binstore self.binstore.add_file(filename) def init(self, args): pass # normal git reset works like this: # 1. if the file is staged, it is unstaged. The file itself is untouched. # 2. if the file is unstaged, nothing happens. # To revert local changes in a modified file, you need to perform a # `checkout --`. # 1. if the file is staged, nothing happens. # 2. if the file is tracked and unstaged, it's contents are reset to the # value at head. # 3. if the file is untracked, an error occurs. # (see: http://git-scm.com/book/en/Git-Basics-Undoing-Things) # # legacy git-bin implemented the following logic: # 1. if the file is not binary (note that staged/unstaged is not # differentiated): # 1.1 if the file is added, a `git reset HEAD` is performed. # 1.2 if the file is modified, a `git checkout --` is performed. # 2. if the file is a binary file: # 2.1 if the file is added, the file is copied back from the binstore and # a `git reset HEAD` is performed. # 2.2 if the file is modified # 2.2.1 and its hash is in the binstore: a `git checkout --` is performed. # 2.2.1 but its hash is not in the binstore and there is a typechange, a # copy of the file is saved in /tmp and then the `git checkout --` is # performed. # # essentially we need two distinct operations: # - unstage: just get it out of the index, but don't touch the file # itself.o # For a binary file that has just been git-bin-add-ed, but was not # previously tracked, we will want to revert to the original file contents. # This more closely resembles the intention of the regular unstage operation # - restore: change back to the contents at HEAD. # For a binstore file this would mean switching back to the # symlink. If there was actually a modification, we also want to save a # 'just-in-case' file. # if we use the standard git nomenclature: # - unstage -> reset # - restore -> checkout -- # let's implement these operations separately. We might implement a # compatibility mode. def reset(self, filenames): """ Unstage a list of files """ print "GitBin.reset(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first reset all directories recursively len(dirs) and self.reset([os.path.join(root, dn) for dn in dirs]) # now reset all the files len(files) and self.reset([os.path.join(root, fn) for fn in files]) continue status = self.gitrepo.status(filename) if not status & git.STATUS_STAGED_MASK == git.STATUS_STAGED: # not staged, skip it. print "you probably meant to do: git bin checkout -- %s" % filename continue # unstage the file: self.gitrepo.unstage(filename) # key: F=real file; S=symlink; T=typechange; M=modified; s=staged # {1} ([F] -> GBAdded[Ss]) -> Untracked[S] # {2} ([S] -> GBEdit[TF] -> Modified[TF] -> GBAdded[MSs]) # -> Modified[MS] new_status = self.gitrepo.status(filename) if self.binstore.has(filename) and ( new_status & git.STATUS_UNTRACKED or new_status & git.STATUS_MODIFIED): # TODO: in case {1} it's possible that we might be leaving an # orphan unreferenced file in the binstore. We might want to # deal with this. commands = cmd.CompoundCommand( cmd.CopyFileCommand( self.binstore.get_binstore_filename(filename), filename), ) commands.execute() def checkout(self, filenames): """ Revert local modifications to a list of files """ print "GitBin.checkout(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first checkout_dashdash all directories recursively len(dirs) and self.checkout([os.path.join(root, dn) for dn in dirs]) # now checkout_dashdash all the files len(files) and self.checkout([os.path.join(root, fn) for fn in files]) continue status = self.gitrepo.status(filename) if (status & git.STATUS_STAGED_MASK) == git.STATUS_STAGED: # staged, skip it. print "you probably meant to do: git bin reset %s" % filename continue if not (status & git.STATUS_CHANGED_MASK): # the file hasn't changed, skip it. continue # The first two cases can just be passed through to regular git # checkout --. # {1} (GBAdded[MSs] -> Reset[MS]) # {2} (GBEdit[TF]) # In the third case, there is some local modification that we should # save 'just in case' first. # {3} (GBEdit[TF] -> Modified[TF]) (*) if (status & git.STATUS_TYPECHANGED) and not self.binstore.has(filename): justincase_filename = os.path.join( "/tmp", "%s.%s.justincase" % (filename, utils.md5_file(filename))) commands = cmd.CompoundCommand( cmd.CopyFileCommand( # self.binstore.get_binstore_filename(filename), filename, justincase_filename), ) commands.execute() self.gitrepo.restore(filename) def edit(self, filenames): """ Retrieve file contents for editing """ print "GitBin.edit(%s)" % filenames for filename in filenames: # if the filename is a directory, recurse into it. # TODO: maybe make recursive directory crawls optional/configurable if os.path.isdir(filename): print "\trecursing into %s" % filename for root, dirs, files in os.walk(filename): # first edit all directories recursively len(dirs) and self.edit([os.path.join(root, dn) for dn in dirs]) # now edit all the files len(files) and self.edit([os.path.join(root, fn) for fn in files]) continue if os.path.islink(filename) and self.binstore.has(filename): self.binstore.edit_file(filename) # TODO: # - implement git operations # - impelement binstore # - use symlink in .git/ folder # - reverse lookups # - implement offline/online commands # - use a .gitbin file to store parameters # - init command? # - if file doesn't exist, suggest creating it on first use # - this file should be committed # - detect online binstore available. if so, and was offline, suggest going online. def print_exception(prefix, exception, verbose=False): print "%s: %s" % (prefix, exception) if verbose: import traceback traceback.print_exc() def get_binstore(repo): return FilesystemBinstore(repo) def _main(args): try: gitrepo = git.GitRepo() binstore = get_binstore(gitrepo) gitbin = GitBin(gitrepo, binstore) cmd = args['<command>'] if args['init']: gitbin.dispatch_command('init', args) elif cmd is not None: gitbin.dispatch_command(cmd, args) except git.GitException, e: print_exception("git", e, args['--debug']) print(__doc__) exit(1) except BinstoreException, e: print_exception("binstore", e, args['--debug']) exit(1) except UnknownCommandException, e: print(__doc__) exit(1) def main(): #args = build_options_parser().parse_args() import sys version = pkg_resources.require("gitbin")[0].version args = docopt(__doc__, version=version, options_first=True) if args: _main(args) if __name__ == '__main__': main()
""" **hep_ml.losses** contains different loss functions to use in gradient boosting. Apart from standard classification losses, **hep_ml** contains losses for uniform classification (see :class:`BinFlatnessLossFunction`, :class:`KnnFlatnessLossFunction`, :class:`KnnAdaLossFunction`) and for ranking (see :class:`RankBoostLossFunction`) **Interface** Loss functions inside **hep_ml** are stateful estimators and require initial fitting, which is done automatically inside gradient boosting. All loss function should be derived from AbstractLossFunction and implement this interface. Examples ________ Training gradient boosting, optimizing LogLoss and using all features >>> from hep_ml.gradientboosting import UGradientBoostingClassifier, LogLossFunction >>> classifier = UGradientBoostingClassifier(loss=LogLossFunction(), n_estimators=100) >>> classifier.fit(X, y, sample_weight=sample_weight) Using composite loss function and subsampling: >>> loss = CompositeLossFunction() >>> classifier = UGradientBoostingClassifier(loss=loss, subsample=0.5) To get uniform predictions in mass in background (note that mass should not present in features): >>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=0, train_features=['pt', 'flight_time']) >>> classifier = UGradientBoostingClassifier(loss=loss) To get uniform predictions in both signal and background: >>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=[0, 1], train_features=['pt', 'flight_time']) >>> classifier = UGradientBoostingClassifier(loss=loss) """ from __future__ import division, print_function, absolute_import import numbers import warnings import numpy import pandas from scipy import sparse from scipy.special import expit from sklearn.utils.validation import check_random_state from sklearn.base import BaseEstimator from .commonutils import compute_knn_indices_of_signal, check_sample_weight, check_uniform_label, weighted_quantile from .metrics_utils import bin_to_group_indices, compute_bin_indices, compute_group_weights, \ group_indices_to_groups_matrix __author__ = 'Alex Rogozhnikov' __all__ = [ 'AbstractLossFunction', 'MSELossFunction', 'MAELossFunction', 'LogLossFunction', 'AdaLossFunction', 'CompositeLossFunction', 'BinFlatnessLossFunction', 'KnnFlatnessLossFunction', 'KnnAdaLossFunction', 'RankBoostLossFunction' ] def _compute_positions(y_pred, sample_weight): """ For each event computes it position among other events by prediction. position = (weighted) part of elements with lower predictions => position belongs to [0, 1] This function is very close to `scipy.stats.rankdata`, but supports weights. """ order = numpy.argsort(y_pred) ordered_weights = sample_weight[order] ordered_weights /= float(numpy.sum(ordered_weights)) efficiencies = (numpy.cumsum(ordered_weights) - 0.5 * ordered_weights) return efficiencies[numpy.argsort(order)] class AbstractLossFunction(BaseEstimator): """ This is base class for loss functions used in `hep_ml`. Main differences compared to `scikit-learn` loss functions: 1. losses are stateful, and may require fitting of training data before usage. 2. thus, when computing gradient, hessian, one shall provide predictions of all events. 3. losses are object that shall be passed as estimators to gradient boosting (see examples). 4. only two-class case is supported, and different classes may have different role and meaning. """ def fit(self, X, y, sample_weight): """ This method is optional, it is called before all the others.""" return self def negative_gradient(self, y_pred): """The y_pred should contain all the events passed to `fit` method, moreover, the order should be the same""" raise NotImplementedError() def __call__(self, y_pred): """The y_pred should contain all the events passed to `fit` method, moreover, the order should be the same""" raise NotImplementedError() def prepare_tree_params(self, y_pred): """Prepares parameters for regression tree that minimizes MSE :param y_pred: contains predictions for all the events passed to `fit` method, moreover, the order should be the same :return: tuple (tree_target, tree_weight) with target and weight to be used in decision tree """ return self.negative_gradient(y_pred), numpy.ones(len(y_pred)) def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ Method for pruning. Loss function can prepare better values for leaves :param terminal_regions: indices of terminal regions of each event. :param leaf_values: numpy.array, current mapping of leaf indices to prediction values. :param y_pred: predictions before adding new tree. :return: numpy.array with new prediction values for all leaves. """ return leaf_values def compute_optimal_step(self, y_pred): """ Compute optimal global step. This method is typically used to make optimal step before fitting trees to reduce variance. :param y_pred: initial predictions, numpy.array of shape [n_samples] :return: float """ return 0. class HessianLossFunction(AbstractLossFunction): """Loss function with diagonal hessian, provides uses Newton-Raphson step to update trees. """ def __init__(self, regularization=5.): """ :param regularization: float, penalty for leaves with few events, corresponds roughly to the number of added events of both classes to each leaf. """ self.regularization = regularization def fit(self, X, y, sample_weight): self.regularization_ = self.regularization * numpy.mean(sample_weight) return self def hessian(self, y_pred): """ Returns diagonal of hessian matrix. :param y_pred: numpy.array of shape [n_samples] with events passed in the same order as in `fit`. :return: numpy.array of shape [n_sampels] with second derivatives with respect to each prediction. """ raise NotImplementedError('Override this method in loss function.') def prepare_tree_params(self, y_pred): grad = self.negative_gradient(y_pred) hess = self.hessian(y_pred) + 0.01 return grad / hess, hess def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ This expression comes from optimization of second-order approximation of loss function.""" min_length = len(leaf_values) nominators = numpy.bincount(terminal_regions, weights=self.negative_gradient(y_pred), minlength=min_length) denominators = numpy.bincount(terminal_regions, weights=self.hessian(y_pred), minlength=min_length) return nominators / (denominators + self.regularization_) def compute_optimal_step(self, y_pred): """ Optimal step is computed using Newton-Raphson algorithm (10 iterations). :param y_pred: predictions (usually, zeros) :return: float """ terminal_regions = numpy.zeros(len(y_pred), dtype='int') leaf_values = numpy.zeros(shape=1) step = 0. for _ in range(10): step_ = self.prepare_new_leaves_values(terminal_regions, leaf_values=leaf_values, y_pred=y_pred + step)[0] step += 0.5 * step_ return step # region Classification losses class AdaLossFunction(HessianLossFunction): """ AdaLossFunction is the same as Exponential Loss Function (aka exploss) """ def fit(self, X, y, sample_weight): self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): return numpy.sum(self.sample_weight * numpy.exp(- self.y_signed * y_pred)) def negative_gradient(self, y_pred): return self.y_signed * self.sample_weight * numpy.exp(- self.y_signed * y_pred) def hessian(self, y_pred): return self.sample_weight * numpy.exp(- self.y_signed * y_pred) def prepare_tree_params(self, y_pred): return self.y_signed, self.hessian(y_pred) class LogLossFunction(HessianLossFunction): """Logistic loss function (logloss), aka binomial deviance, aka cross-entropy, aka log-likelihood loss. """ def fit(self, X, y, sample_weight): self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): return numpy.sum(self.sample_weight * numpy.logaddexp(0, - self.y_signed * y_pred)) def negative_gradient(self, y_pred): return self.y_signed * self.sample_weight * expit(- self.y_signed * y_pred) def hessian(self, y_pred): expits = expit(self.y_signed * y_pred) return self.sample_weight * expits * (1 - expits) def prepare_tree_params(self, y_pred): return self.y_signed * expit(- self.y_signed * y_pred), self.sample_weight class CompositeLossFunction(HessianLossFunction): """ Composite loss function is defined as exploss for backgorund events and logloss for signal with proper constants. Such kind of loss functions is very useful to optimize AMS or in situations where very clean signal is expected. """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 self.sig_w = (y == 1) * self.sample_weight self.bck_w = (y == 0) * self.sample_weight HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): result = numpy.sum(self.sig_w * numpy.logaddexp(0, -y_pred)) result += numpy.sum(self.bck_w * numpy.exp(0.5 * y_pred)) return result def negative_gradient(self, y_pred): result = self.sig_w * expit(- y_pred) result -= 0.5 * self.bck_w * numpy.exp(0.5 * y_pred) return result def hessian(self, y_pred): expits = expit(- y_pred) return self.sig_w * expits * (1 - expits) + self.bck_w * 0.25 * numpy.exp(0.5 * y_pred) # endregion # region Regression Losses class MSELossFunction(HessianLossFunction): r""" Mean squared error loss function, used for regression. :math:`\text{loss} = \sum_i (y_i - \hat{y}_i)^2` """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) return self def __call__(self, y_pred): return 0.5 * numpy.sum(self.sample_weight * (self.y - y_pred) ** 2) def negative_gradient(self, y_pred): return self.sample_weight * (self.y - y_pred) def hessian(self, y_pred): return self.sample_weight def prepare_tree_params(self, y_pred): return self.y - y_pred, self.sample_weight def compute_optimal_step(self, y_pred): return numpy.average(self.y - y_pred, weights=self.sample_weight) class MAELossFunction(AbstractLossFunction): r""" Mean absolute error loss function, used for regression. :math:`\text{loss} = \sum_i |y_i - \hat{y}_i|` """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True) return self def __call__(self, y_pred): return 0.5 * numpy.sum(self.sample_weight * numpy.abs(self.y - y_pred)) def negative_gradient(self, y_pred): return self.sample_weight * numpy.sign(self.y - y_pred) def prepare_tree_params(self, y_pred): return numpy.sign(self.y - y_pred), self.sample_weight def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): # TODO use weighted median new_leaf_values = numpy.zeros(len(leaf_values), dtype='float') target = (self.y - y_pred) for terminal_region in range(len(leaf_values)): values = target[terminal_regions == terminal_region] values = numpy.insert(values, [0], [0]) new_leaf_values[terminal_region] = numpy.median(values) return new_leaf_values def compute_optimal_step(self, y_pred): return weighted_quantile(self.y - y_pred, quantiles=[0.5], sample_weight=self.sample_weight)[0] # endregion RegressionLosses class RankBoostLossFunction(HessianLossFunction): def __init__(self, request_column, penalty_power=1., update_iterations=1): r"""RankBoostLossFunction is target of optimization in RankBoost [RB]_ algorithm, which was developed for ranking and introduces penalties for wrong order of predictions. However, this implementation goes further and there is selection of optimal leaf values based on iterative procedure. This implementation also uses matrix decomposition of loss function, which is very effective, when labels are from some very limited set (usually it is 0, 1, 2, 3, 4) :math:`\text{loss} = \sum_{ij} w_{ij} exp(pred_i - pred_j)`, :math:`w_{ij} = ( \alpha + \beta * [query_i = query_j]) R_{label_i, label_j}`, where :math:`R_{ij} = 0` if :math:`i \leq j`, else :math:`R_{ij} = (i - j)^{p}` :param str request_column: name of column with search query ids. The higher attention is payed to samples with same query. :param float penalty_power: describes dependence of penalty on the difference between target labels. :param int update_iterations: number of minimization steps to provide optimal values. .. [RB] Y. Freund et al. An Efficient Boosting Algorithm for Combining Preferences """ self.update_terations = update_iterations self.penalty_power = penalty_power self.request_column = request_column HessianLossFunction.__init__(self, regularization=0.1) def fit(self, X, y, sample_weight): self.queries = X[self.request_column] self.y = y self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True) self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True) self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks] self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)] self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float) for r1 in self.possible_ranks: for r2 in self.possible_ranks: if r1 < r2: self.rank_penalties[r1, r2] = (r2 - r1) ** self.penalty_power self.penalty_matrices = [] self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y))) n_queries = numpy.bincount(normed_queries) assert len(n_queries) == len(self.possible_queries) self.penalty_matrices.append( sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries])) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) def __call__(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) result = 0. for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) result += pos_stats.T.dot(penalty_matrix.dot(neg_stats)) return result def negative_gradient(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) gradient = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) gradient += pos_exponent * penalty_matrix.dot(neg_stats)[lookup] gradient -= neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup] return - gradient def hessian(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) result = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) result += pos_exponent * penalty_matrix.dot(neg_stats)[lookup] result += neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup] return result def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): leaves_values = numpy.zeros(len(leaf_values)) for _ in range(self.update_terations): y_test = y_pred + leaves_values[terminal_regions] new_leaves_values = self._prepare_new_leaves_values(terminal_regions, leaves_values, y_test) leaves_values = 0.5 * new_leaves_values + leaves_values return leaves_values def _prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ For each event we shall represent loss as w_plus * e^{pred} + w_minus * e^{-pred}, then we are able to construct optimal step. Pay attention: this is not an optimal, since we are ignoring, that some events belong to the same leaf """ pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) w_plus = numpy.zeros(len(y_pred), dtype=float) w_minus = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) w_plus += penalty_matrix.dot(neg_stats)[lookup] w_minus += penalty_matrix.T.dot(pos_stats)[lookup] w_plus_leaf = numpy.bincount(terminal_regions, weights=w_plus * pos_exponent) + self.regularization w_minus_leaf = numpy.bincount(terminal_regions, weights=w_minus * neg_exponent) + self.regularization return 0.5 * numpy.log(w_minus_leaf / w_plus_leaf) # region MatrixLossFunction class AbstractMatrixLossFunction(HessianLossFunction): def __init__(self, uniform_features, regularization=5.): r"""AbstractMatrixLossFunction is a base class to be inherited by other loss functions, which choose the particular A matrix and w vector. The formula of loss is: \text{loss} = \sum_i w_i * exp(- \sum_j a_ij y_j score_j) """ self.uniform_features = uniform_features # real matrix and vector will be computed during fitting self.A = None self.A_t = None self.w = None HessianLossFunction.__init__(self, regularization=regularization) def fit(self, X, y, sample_weight): """This method is used to compute A matrix and w based on train dataset""" assert len(X) == len(y), "different size of arrays" A, w = self.compute_parameters(X, y, sample_weight) self.A = sparse.csr_matrix(A) self.A_t = sparse.csr_matrix(self.A.transpose()) self.A_t_sq = self.A_t.multiply(self.A_t) self.w = numpy.array(w) assert A.shape[0] == len(w), "inconsistent sizes" assert A.shape[1] == len(X), "wrong size of matrix" self.y_signed = numpy.array(2 * y - 1) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) return self def __call__(self, y_pred): """Computing the loss itself""" assert len(y_pred) == self.A.shape[1], "something is wrong with sizes" exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) return numpy.sum(self.w * exponents) def negative_gradient(self, y_pred): """Computing negative gradient""" assert len(y_pred) == self.A.shape[1], "something is wrong with sizes" exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) result = self.A_t.dot(self.w * exponents) * self.y_signed return result def hessian(self, y_pred): assert len(y_pred) == self.A.shape[1], 'something wrong with sizes' exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) result = self.A_t_sq.dot(self.w * exponents) return result def compute_parameters(self, trainX, trainY, trainW): """This method should be overloaded in descendant, and should return A, w (matrix and vector)""" raise NotImplementedError() def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) # current approach uses Newton-Raphson step # TODO compare with iterative suboptimal choice of value, based on exp(a x) ~ a exp(x) regions_matrix = sparse.csc_matrix((self.y_signed, [numpy.arange(len(self.y_signed)), terminal_regions])) # Z is matrix of shape [n_exponents, n_terminal_regions] # with contributions of each terminal region to each exponent Z = self.A.dot(regions_matrix) Z = Z.T nominator = Z.dot(self.w * exponents) denominator = Z.multiply(Z).dot(self.w * exponents) return nominator / (denominator + 1e-5) class KnnAdaLossFunction(AbstractMatrixLossFunction): def __init__(self, uniform_features, uniform_label, knn=10, row_norm=1.): r"""Modification of AdaLoss to achieve uniformity of predictions :math:`\text{loss} = \sum_i w_i * exp(- \sum_j a_{ij} y_j score_j)` `A` matrix is square, each row corresponds to a single event in train dataset, in each row we put ones to the closest neighbours if this event from uniform class. See [BU]_ for details. :param list[str] uniform_features: the features, along which uniformity is desired :param int|list[int] uniform_label: the label (labels) of 'uniform classes' :param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class' .. [BU] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.knn = knn self.row_norm = row_norm self.uniform_label = check_uniform_label(uniform_label) AbstractMatrixLossFunction.__init__(self, uniform_features) def compute_parameters(self, trainX, trainY, trainW): A_parts = [] w_parts = [] for label in self.uniform_label: label_mask = numpy.array(trainY == label) n_label = numpy.sum(label_mask) knn_indices = compute_knn_indices_of_signal(trainX[self.uniform_features], label_mask, self.knn) knn_indices = knn_indices[label_mask, :] ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn) column_indices = knn_indices.flatten() data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)]) w_part = numpy.mean(numpy.take(trainW, knn_indices), axis=1) assert A_part.shape[0] == len(w_part) A_parts.append(A_part) w_parts.append(w_part) for label in set(trainY) - set(self.uniform_label): label_mask = trainY == label n_label = numpy.sum(label_mask) ind_ptr = numpy.arange(0, n_label + 1) column_indices = numpy.where(label_mask)[0].flatten() data = numpy.ones(n_label, dtype=float) * self.row_norm A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)]) w_part = trainW[label_mask] A_parts.append(A_part) w_parts.append(w_part) A = sparse.vstack(A_parts, format='csr', dtype=float) w = numpy.concatenate(w_parts) assert A.shape == (len(trainX), len(trainX)) return A, w # endregion # region ReweightLossFunction # Mathematically at each stage we # 0. recompute weights # 1. normalize ratio between distributions (negatives are in opposite distribution) # 2. chi2 - changing only sign, weights are the same # 3. optimal value: simply log as usual (negatives are in the same distribution with sign -) class ReweightLossFunction(AbstractLossFunction): def __init__(self, regularization=5.): """ Loss function used to reweight events. Conventions: y=0 - target distribution, y=1 - original distribution. Weights after look like: w = w_0 for target distribution w = w_0 * exp(pred) for events from original distribution (so pred for target distribution is ignored) :param regularization: roughly, it's number of events added in each leaf to prevent overfitting. """ self.regularization = regularization def fit(self, X, y, sample_weight): assert numpy.all(numpy.in1d(y, [0, 1])) if sample_weight is None: self.sample_weight = numpy.ones(len(X), dtype=float) else: self.sample_weight = numpy.array(sample_weight, dtype=float) self.y = y # signs encounter transfer to opposite distribution self.signs = (2 * y - 1) * numpy.sign(sample_weight) self.mask_original = numpy.array(self.y) self.mask_target = numpy.array(1 - self.y) return self def _compute_weights(self, y_pred): """We need renormalization at eac step""" weights = self.sample_weight * numpy.exp(self.y * y_pred) return check_sample_weight(self.y, weights, normalize=True, normalize_by_class=True) def __call__(self, *args, **kwargs): """ Loss function doesn't have precise expression """ return 0 def negative_gradient(self, y_pred): return 0. def prepare_tree_params(self, y_pred): return self.signs, numpy.abs(self._compute_weights(y_pred)) def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): weights = self._compute_weights(y_pred) w_target = numpy.bincount(terminal_regions, weights=self.mask_target * weights) w_original = numpy.bincount(terminal_regions, weights=self.mask_original * weights) # suppressing possibly negative samples w_target = w_target.clip(0) w_original = w_original.clip(0) return numpy.log(w_target + self.regularization) - numpy.log(w_original + self.regularization) # endregion # region FlatnessLossFunction def _exp_margin(margin): """ margin = - y_signed * y_pred """ return numpy.exp(numpy.clip(margin, -1e5, 2)) class AbstractFlatnessLossFunction(AbstractLossFunction): """Base class for FlatnessLosses""" def __init__(self, uniform_features, uniform_label, power=2., fl_coefficient=3., allow_wrong_signs=True): self.uniform_features = uniform_features if isinstance(uniform_label, numbers.Number): self.uniform_label = numpy.array([uniform_label]) else: self.uniform_label = numpy.array(uniform_label) self.power = power self.fl_coefficient = fl_coefficient self.allow_wrong_signs = allow_wrong_signs def fit(self, X, y, sample_weight=None): sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) assert len(X) == len(y), 'lengths are different' X = pandas.DataFrame(X) self.group_indices = dict() self.group_matrices = dict() self.group_weights = dict() occurences = numpy.zeros(len(X)) for label in self.uniform_label: self.group_indices[label] = self._compute_groups_indices(X, y, label=label) self.group_matrices[label] = group_indices_to_groups_matrix(self.group_indices[label], len(X)) self.group_weights[label] = compute_group_weights(self.group_matrices[label], sample_weight=sample_weight) for group in self.group_indices[label]: occurences[group] += 1 out_of_bins = (occurences == 0) & numpy.in1d(y, self.uniform_label) if numpy.mean(out_of_bins) > 0.01: warnings.warn("%i events out of all bins " % numpy.sum(out_of_bins), UserWarning) self.y = y self.y_signed = 2 * y - 1 self.sample_weight = numpy.copy(sample_weight) self.divided_weight = sample_weight / numpy.maximum(occurences, 1) return self def _compute_groups_indices(self, X, y, label): raise NotImplementedError('To be overriden in descendants.') def __call__(self, pred): # the actual value does not play any role in boosting # optimizing here return 0 def _compute_fl_derivatives(self, y_pred): y_pred = numpy.ravel(y_pred) neg_gradient = numpy.zeros(len(self.y), dtype=numpy.float) for label in self.uniform_label: label_mask = self.y == label global_positions = numpy.zeros(len(y_pred), dtype=float) global_positions[label_mask] = \ _compute_positions(y_pred[label_mask], sample_weight=self.sample_weight[label_mask]) for indices_in_bin in self.group_indices[label]: local_pos = _compute_positions(y_pred[indices_in_bin], sample_weight=self.sample_weight[indices_in_bin]) global_pos = global_positions[indices_in_bin] bin_gradient = self.power * numpy.sign(local_pos - global_pos) * \ numpy.abs(local_pos - global_pos) ** (self.power - 1) neg_gradient[indices_in_bin] += bin_gradient neg_gradient *= self.divided_weight # check that events outside uniform uniform classes are not touched assert numpy.all(neg_gradient[~numpy.in1d(self.y, self.uniform_label)] == 0) return neg_gradient def negative_gradient(self, y_pred): y_signed = self.y_signed neg_gradient = self._compute_fl_derivatives(y_pred) * self.fl_coefficient # adding ExpLoss neg_gradient += y_signed * self.sample_weight * _exp_margin(-y_signed * y_pred) if not self.allow_wrong_signs: neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5) return neg_gradient class BinFlatnessLossFunction(AbstractFlatnessLossFunction): def __init__(self, uniform_features, uniform_label, n_bins=10, power=2., fl_coefficient=3., allow_wrong_signs=True): r""" This loss function contains separately penalty for non-flatness and for bad prediction quality. See [FL]_ for details. :math:`\text{loss} =\text{ExpLoss} + c \times \text{FlatnessLoss}` FlatnessLoss computed using binning of uniform variables :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired :param int n_bins: number of bins along each variable :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity. :param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class" (i.e. may have negative gradient on signal). If False, values will be clipped to zero. .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.n_bins = n_bins AbstractFlatnessLossFunction.__init__(self, uniform_features, uniform_label=uniform_label, power=power, fl_coefficient=fl_coefficient, allow_wrong_signs=allow_wrong_signs) def _compute_groups_indices(self, X, y, label): """Returns a list, each element is events' indices in some group.""" label_mask = y == label extended_bin_limits = [] for var in self.uniform_features: f_min, f_max = numpy.min(X[var][label_mask]), numpy.max(X[var][label_mask]) extended_bin_limits.append(numpy.linspace(f_min, f_max, 2 * self.n_bins + 1)) groups_indices = list() for shift in [0, 1]: bin_limits = [] for axis_limits in extended_bin_limits: bin_limits.append(axis_limits[1 + shift:-1:2]) bin_indices = compute_bin_indices(X.ix[:, self.uniform_features].values, bin_limits=bin_limits) groups_indices += list(bin_to_group_indices(bin_indices, mask=label_mask)) return groups_indices class KnnFlatnessLossFunction(AbstractFlatnessLossFunction): def __init__(self, uniform_features, uniform_label, n_neighbours=100, power=2., fl_coefficient=3., max_groups=5000, allow_wrong_signs=True, random_state=42): r""" This loss function contains separately penalty for non-flatness and for bad prediction quality. See [FL]_ for details. :math:`\text{loss} = \text{ExpLoss} + c \times \text{FlatnessLoss}` FlatnessLoss computed using nearest neighbors in space of uniform features :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired :param int n_neighbours: number of neighbors used in flatness loss :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity. :param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class" (i.e. may have negative gradient on signal). If False, values will be clipped to zero. :param int max_groups: to limit memory consumption when training sample is large, we randomly pick this number of points with their members. .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.n_neighbours = n_neighbours self.max_groups = max_groups self.random_state = random_state AbstractFlatnessLossFunction.__init__(self, uniform_features, uniform_label=uniform_label, power=power, fl_coefficient=fl_coefficient, allow_wrong_signs=allow_wrong_signs) def _compute_groups_indices(self, X, y, label): mask = y == label self.random_state = check_random_state(self.random_state) knn_indices = compute_knn_indices_of_signal(X[self.uniform_features], mask, n_neighbours=self.n_neighbours)[mask, :] if len(knn_indices) > self.max_groups: selected_group = self.random_state.choice(len(knn_indices), size=self.max_groups, replace=False) return knn_indices[selected_group, :] else: return knn_indices # endregion added reweight loss function to documentation """ **hep_ml.losses** contains different loss functions to use in gradient boosting. Apart from standard classification losses, **hep_ml** contains losses for uniform classification (see :class:`BinFlatnessLossFunction`, :class:`KnnFlatnessLossFunction`, :class:`KnnAdaLossFunction`) and for ranking (see :class:`RankBoostLossFunction`) **Interface** Loss functions inside **hep_ml** are stateful estimators and require initial fitting, which is done automatically inside gradient boosting. All loss function should be derived from AbstractLossFunction and implement this interface. Examples ________ Training gradient boosting, optimizing LogLoss and using all features >>> from hep_ml.gradientboosting import UGradientBoostingClassifier, LogLossFunction >>> classifier = UGradientBoostingClassifier(loss=LogLossFunction(), n_estimators=100) >>> classifier.fit(X, y, sample_weight=sample_weight) Using composite loss function and subsampling: >>> loss = CompositeLossFunction() >>> classifier = UGradientBoostingClassifier(loss=loss, subsample=0.5) To get uniform predictions in mass in background (note that mass should not present in features): >>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=0, train_features=['pt', 'flight_time']) >>> classifier = UGradientBoostingClassifier(loss=loss) To get uniform predictions in both signal and background: >>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=[0, 1], train_features=['pt', 'flight_time']) >>> classifier = UGradientBoostingClassifier(loss=loss) """ from __future__ import division, print_function, absolute_import import numbers import warnings import numpy import pandas from scipy import sparse from scipy.special import expit from sklearn.utils.validation import check_random_state from sklearn.base import BaseEstimator from .commonutils import compute_knn_indices_of_signal, check_sample_weight, check_uniform_label, weighted_quantile from .metrics_utils import bin_to_group_indices, compute_bin_indices, compute_group_weights, \ group_indices_to_groups_matrix __author__ = 'Alex Rogozhnikov' __all__ = [ 'AbstractLossFunction', 'MSELossFunction', 'MAELossFunction', 'LogLossFunction', 'AdaLossFunction', 'CompositeLossFunction', 'BinFlatnessLossFunction', 'KnnFlatnessLossFunction', 'KnnAdaLossFunction', 'RankBoostLossFunction', 'ReweightLossFunction' ] def _compute_positions(y_pred, sample_weight): """ For each event computes it position among other events by prediction. position = (weighted) part of elements with lower predictions => position belongs to [0, 1] This function is very close to `scipy.stats.rankdata`, but supports weights. """ order = numpy.argsort(y_pred) ordered_weights = sample_weight[order] ordered_weights /= float(numpy.sum(ordered_weights)) efficiencies = (numpy.cumsum(ordered_weights) - 0.5 * ordered_weights) return efficiencies[numpy.argsort(order)] class AbstractLossFunction(BaseEstimator): """ This is base class for loss functions used in `hep_ml`. Main differences compared to `scikit-learn` loss functions: 1. losses are stateful, and may require fitting of training data before usage. 2. thus, when computing gradient, hessian, one shall provide predictions of all events. 3. losses are object that shall be passed as estimators to gradient boosting (see examples). 4. only two-class case is supported, and different classes may have different role and meaning. """ def fit(self, X, y, sample_weight): """ This method is optional, it is called before all the others.""" return self def negative_gradient(self, y_pred): """The y_pred should contain all the events passed to `fit` method, moreover, the order should be the same""" raise NotImplementedError() def __call__(self, y_pred): """The y_pred should contain all the events passed to `fit` method, moreover, the order should be the same""" raise NotImplementedError() def prepare_tree_params(self, y_pred): """Prepares parameters for regression tree that minimizes MSE :param y_pred: contains predictions for all the events passed to `fit` method, moreover, the order should be the same :return: tuple (tree_target, tree_weight) with target and weight to be used in decision tree """ return self.negative_gradient(y_pred), numpy.ones(len(y_pred)) def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ Method for pruning. Loss function can prepare better values for leaves :param terminal_regions: indices of terminal regions of each event. :param leaf_values: numpy.array, current mapping of leaf indices to prediction values. :param y_pred: predictions before adding new tree. :return: numpy.array with new prediction values for all leaves. """ return leaf_values def compute_optimal_step(self, y_pred): """ Compute optimal global step. This method is typically used to make optimal step before fitting trees to reduce variance. :param y_pred: initial predictions, numpy.array of shape [n_samples] :return: float """ return 0. class HessianLossFunction(AbstractLossFunction): """Loss function with diagonal hessian, provides uses Newton-Raphson step to update trees. """ def __init__(self, regularization=5.): """ :param regularization: float, penalty for leaves with few events, corresponds roughly to the number of added events of both classes to each leaf. """ self.regularization = regularization def fit(self, X, y, sample_weight): self.regularization_ = self.regularization * numpy.mean(sample_weight) return self def hessian(self, y_pred): """ Returns diagonal of hessian matrix. :param y_pred: numpy.array of shape [n_samples] with events passed in the same order as in `fit`. :return: numpy.array of shape [n_sampels] with second derivatives with respect to each prediction. """ raise NotImplementedError('Override this method in loss function.') def prepare_tree_params(self, y_pred): grad = self.negative_gradient(y_pred) hess = self.hessian(y_pred) + 0.01 return grad / hess, hess def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ This expression comes from optimization of second-order approximation of loss function.""" min_length = len(leaf_values) nominators = numpy.bincount(terminal_regions, weights=self.negative_gradient(y_pred), minlength=min_length) denominators = numpy.bincount(terminal_regions, weights=self.hessian(y_pred), minlength=min_length) return nominators / (denominators + self.regularization_) def compute_optimal_step(self, y_pred): """ Optimal step is computed using Newton-Raphson algorithm (10 iterations). :param y_pred: predictions (usually, zeros) :return: float """ terminal_regions = numpy.zeros(len(y_pred), dtype='int') leaf_values = numpy.zeros(shape=1) step = 0. for _ in range(10): step_ = self.prepare_new_leaves_values(terminal_regions, leaf_values=leaf_values, y_pred=y_pred + step)[0] step += 0.5 * step_ return step # region Classification losses class AdaLossFunction(HessianLossFunction): """ AdaLossFunction is the same as Exponential Loss Function (aka exploss) """ def fit(self, X, y, sample_weight): self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): return numpy.sum(self.sample_weight * numpy.exp(- self.y_signed * y_pred)) def negative_gradient(self, y_pred): return self.y_signed * self.sample_weight * numpy.exp(- self.y_signed * y_pred) def hessian(self, y_pred): return self.sample_weight * numpy.exp(- self.y_signed * y_pred) def prepare_tree_params(self, y_pred): return self.y_signed, self.hessian(y_pred) class LogLossFunction(HessianLossFunction): """Logistic loss function (logloss), aka binomial deviance, aka cross-entropy, aka log-likelihood loss. """ def fit(self, X, y, sample_weight): self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): return numpy.sum(self.sample_weight * numpy.logaddexp(0, - self.y_signed * y_pred)) def negative_gradient(self, y_pred): return self.y_signed * self.sample_weight * expit(- self.y_signed * y_pred) def hessian(self, y_pred): expits = expit(self.y_signed * y_pred) return self.sample_weight * expits * (1 - expits) def prepare_tree_params(self, y_pred): return self.y_signed * expit(- self.y_signed * y_pred), self.sample_weight class CompositeLossFunction(HessianLossFunction): """ Composite loss function is defined as exploss for backgorund events and logloss for signal with proper constants. Such kind of loss functions is very useful to optimize AMS or in situations where very clean signal is expected. """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) self.y_signed = 2 * y - 1 self.sig_w = (y == 1) * self.sample_weight self.bck_w = (y == 0) * self.sample_weight HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight) return self def __call__(self, y_pred): result = numpy.sum(self.sig_w * numpy.logaddexp(0, -y_pred)) result += numpy.sum(self.bck_w * numpy.exp(0.5 * y_pred)) return result def negative_gradient(self, y_pred): result = self.sig_w * expit(- y_pred) result -= 0.5 * self.bck_w * numpy.exp(0.5 * y_pred) return result def hessian(self, y_pred): expits = expit(- y_pred) return self.sig_w * expits * (1 - expits) + self.bck_w * 0.25 * numpy.exp(0.5 * y_pred) # endregion # region Regression Losses class MSELossFunction(HessianLossFunction): r""" Mean squared error loss function, used for regression. :math:`\text{loss} = \sum_i (y_i - \hat{y}_i)^2` """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) return self def __call__(self, y_pred): return 0.5 * numpy.sum(self.sample_weight * (self.y - y_pred) ** 2) def negative_gradient(self, y_pred): return self.sample_weight * (self.y - y_pred) def hessian(self, y_pred): return self.sample_weight def prepare_tree_params(self, y_pred): return self.y - y_pred, self.sample_weight def compute_optimal_step(self, y_pred): return numpy.average(self.y - y_pred, weights=self.sample_weight) class MAELossFunction(AbstractLossFunction): r""" Mean absolute error loss function, used for regression. :math:`\text{loss} = \sum_i |y_i - \hat{y}_i|` """ def fit(self, X, y, sample_weight): self.y = y self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True) return self def __call__(self, y_pred): return 0.5 * numpy.sum(self.sample_weight * numpy.abs(self.y - y_pred)) def negative_gradient(self, y_pred): return self.sample_weight * numpy.sign(self.y - y_pred) def prepare_tree_params(self, y_pred): return numpy.sign(self.y - y_pred), self.sample_weight def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): # TODO use weighted median new_leaf_values = numpy.zeros(len(leaf_values), dtype='float') target = (self.y - y_pred) for terminal_region in range(len(leaf_values)): values = target[terminal_regions == terminal_region] values = numpy.insert(values, [0], [0]) new_leaf_values[terminal_region] = numpy.median(values) return new_leaf_values def compute_optimal_step(self, y_pred): return weighted_quantile(self.y - y_pred, quantiles=[0.5], sample_weight=self.sample_weight)[0] # endregion RegressionLosses class RankBoostLossFunction(HessianLossFunction): def __init__(self, request_column, penalty_power=1., update_iterations=1): r"""RankBoostLossFunction is target of optimization in RankBoost [RB]_ algorithm, which was developed for ranking and introduces penalties for wrong order of predictions. However, this implementation goes further and there is selection of optimal leaf values based on iterative procedure. This implementation also uses matrix decomposition of loss function, which is very effective, when labels are from some very limited set (usually it is 0, 1, 2, 3, 4) :math:`\text{loss} = \sum_{ij} w_{ij} exp(pred_i - pred_j)`, :math:`w_{ij} = ( \alpha + \beta * [query_i = query_j]) R_{label_i, label_j}`, where :math:`R_{ij} = 0` if :math:`i \leq j`, else :math:`R_{ij} = (i - j)^{p}` :param str request_column: name of column with search query ids. The higher attention is payed to samples with same query. :param float penalty_power: describes dependence of penalty on the difference between target labels. :param int update_iterations: number of minimization steps to provide optimal values. .. [RB] Y. Freund et al. An Efficient Boosting Algorithm for Combining Preferences """ self.update_terations = update_iterations self.penalty_power = penalty_power self.request_column = request_column HessianLossFunction.__init__(self, regularization=0.1) def fit(self, X, y, sample_weight): self.queries = X[self.request_column] self.y = y self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True) self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True) self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks] self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)] self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float) for r1 in self.possible_ranks: for r2 in self.possible_ranks: if r1 < r2: self.rank_penalties[r1, r2] = (r2 - r1) ** self.penalty_power self.penalty_matrices = [] self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y))) n_queries = numpy.bincount(normed_queries) assert len(n_queries) == len(self.possible_queries) self.penalty_matrices.append( sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries])) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) def __call__(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) result = 0. for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) result += pos_stats.T.dot(penalty_matrix.dot(neg_stats)) return result def negative_gradient(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) gradient = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) gradient += pos_exponent * penalty_matrix.dot(neg_stats)[lookup] gradient -= neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup] return - gradient def hessian(self, y_pred): y_pred -= y_pred.mean() pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) result = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) result += pos_exponent * penalty_matrix.dot(neg_stats)[lookup] result += neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup] return result def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): leaves_values = numpy.zeros(len(leaf_values)) for _ in range(self.update_terations): y_test = y_pred + leaves_values[terminal_regions] new_leaves_values = self._prepare_new_leaves_values(terminal_regions, leaves_values, y_test) leaves_values = 0.5 * new_leaves_values + leaves_values return leaves_values def _prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): """ For each event we shall represent loss as w_plus * e^{pred} + w_minus * e^{-pred}, then we are able to construct optimal step. Pay attention: this is not an optimal, since we are ignoring, that some events belong to the same leaf """ pos_exponent = numpy.exp(y_pred) neg_exponent = numpy.exp(-y_pred) w_plus = numpy.zeros(len(y_pred), dtype=float) w_minus = numpy.zeros(len(y_pred), dtype=float) for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices): pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length) neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length) w_plus += penalty_matrix.dot(neg_stats)[lookup] w_minus += penalty_matrix.T.dot(pos_stats)[lookup] w_plus_leaf = numpy.bincount(terminal_regions, weights=w_plus * pos_exponent) + self.regularization w_minus_leaf = numpy.bincount(terminal_regions, weights=w_minus * neg_exponent) + self.regularization return 0.5 * numpy.log(w_minus_leaf / w_plus_leaf) # region MatrixLossFunction class AbstractMatrixLossFunction(HessianLossFunction): def __init__(self, uniform_features, regularization=5.): r"""AbstractMatrixLossFunction is a base class to be inherited by other loss functions, which choose the particular A matrix and w vector. The formula of loss is: \text{loss} = \sum_i w_i * exp(- \sum_j a_ij y_j score_j) """ self.uniform_features = uniform_features # real matrix and vector will be computed during fitting self.A = None self.A_t = None self.w = None HessianLossFunction.__init__(self, regularization=regularization) def fit(self, X, y, sample_weight): """This method is used to compute A matrix and w based on train dataset""" assert len(X) == len(y), "different size of arrays" A, w = self.compute_parameters(X, y, sample_weight) self.A = sparse.csr_matrix(A) self.A_t = sparse.csr_matrix(self.A.transpose()) self.A_t_sq = self.A_t.multiply(self.A_t) self.w = numpy.array(w) assert A.shape[0] == len(w), "inconsistent sizes" assert A.shape[1] == len(X), "wrong size of matrix" self.y_signed = numpy.array(2 * y - 1) HessianLossFunction.fit(self, X, y, sample_weight=sample_weight) return self def __call__(self, y_pred): """Computing the loss itself""" assert len(y_pred) == self.A.shape[1], "something is wrong with sizes" exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) return numpy.sum(self.w * exponents) def negative_gradient(self, y_pred): """Computing negative gradient""" assert len(y_pred) == self.A.shape[1], "something is wrong with sizes" exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) result = self.A_t.dot(self.w * exponents) * self.y_signed return result def hessian(self, y_pred): assert len(y_pred) == self.A.shape[1], 'something wrong with sizes' exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) result = self.A_t_sq.dot(self.w * exponents) return result def compute_parameters(self, trainX, trainY, trainW): """This method should be overloaded in descendant, and should return A, w (matrix and vector)""" raise NotImplementedError() def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred)) # current approach uses Newton-Raphson step # TODO compare with iterative suboptimal choice of value, based on exp(a x) ~ a exp(x) regions_matrix = sparse.csc_matrix((self.y_signed, [numpy.arange(len(self.y_signed)), terminal_regions])) # Z is matrix of shape [n_exponents, n_terminal_regions] # with contributions of each terminal region to each exponent Z = self.A.dot(regions_matrix) Z = Z.T nominator = Z.dot(self.w * exponents) denominator = Z.multiply(Z).dot(self.w * exponents) return nominator / (denominator + 1e-5) class KnnAdaLossFunction(AbstractMatrixLossFunction): def __init__(self, uniform_features, uniform_label, knn=10, row_norm=1.): r"""Modification of AdaLoss to achieve uniformity of predictions :math:`\text{loss} = \sum_i w_i * exp(- \sum_j a_{ij} y_j score_j)` `A` matrix is square, each row corresponds to a single event in train dataset, in each row we put ones to the closest neighbours if this event from uniform class. See [BU]_ for details. :param list[str] uniform_features: the features, along which uniformity is desired :param int|list[int] uniform_label: the label (labels) of 'uniform classes' :param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class' .. [BU] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.knn = knn self.row_norm = row_norm self.uniform_label = check_uniform_label(uniform_label) AbstractMatrixLossFunction.__init__(self, uniform_features) def compute_parameters(self, trainX, trainY, trainW): A_parts = [] w_parts = [] for label in self.uniform_label: label_mask = numpy.array(trainY == label) n_label = numpy.sum(label_mask) knn_indices = compute_knn_indices_of_signal(trainX[self.uniform_features], label_mask, self.knn) knn_indices = knn_indices[label_mask, :] ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn) column_indices = knn_indices.flatten() data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)]) w_part = numpy.mean(numpy.take(trainW, knn_indices), axis=1) assert A_part.shape[0] == len(w_part) A_parts.append(A_part) w_parts.append(w_part) for label in set(trainY) - set(self.uniform_label): label_mask = trainY == label n_label = numpy.sum(label_mask) ind_ptr = numpy.arange(0, n_label + 1) column_indices = numpy.where(label_mask)[0].flatten() data = numpy.ones(n_label, dtype=float) * self.row_norm A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)]) w_part = trainW[label_mask] A_parts.append(A_part) w_parts.append(w_part) A = sparse.vstack(A_parts, format='csr', dtype=float) w = numpy.concatenate(w_parts) assert A.shape == (len(trainX), len(trainX)) return A, w # endregion # region ReweightLossFunction # Mathematically at each stage we # 0. recompute weights # 1. normalize ratio between distributions (negatives are in opposite distribution) # 2. chi2 - changing only sign, weights are the same # 3. optimal value: simply log (negatives are in the same distribution with sign -) class ReweightLossFunction(AbstractLossFunction): def __init__(self, regularization=5.): """ Loss function used to reweight destributions. Works inside :class:`hep_ml.reweight.GBReweighter` Conventions: y=0 - target distribution, y=1 - original distribution. Weights after look like: w = w_0 for target distribution w = w_0 * exp(pred) for events from original distribution (so pred for target distribution is ignored) :param regularization: roughly, it's number of events added in each leaf to prevent overfitting. """ self.regularization = regularization def fit(self, X, y, sample_weight): assert numpy.all(numpy.in1d(y, [0, 1])) if sample_weight is None: self.sample_weight = numpy.ones(len(X), dtype=float) else: self.sample_weight = numpy.array(sample_weight, dtype=float) self.y = y # signs encounter transfer to opposite distribution self.signs = (2 * y - 1) * numpy.sign(sample_weight) self.mask_original = numpy.array(self.y) self.mask_target = numpy.array(1 - self.y) return self def _compute_weights(self, y_pred): """We need renormalization at eac step""" weights = self.sample_weight * numpy.exp(self.y * y_pred) return check_sample_weight(self.y, weights, normalize=True, normalize_by_class=True) def __call__(self, *args, **kwargs): """ Loss function doesn't have precise expression """ return 0 def negative_gradient(self, y_pred): return 0. def prepare_tree_params(self, y_pred): return self.signs, numpy.abs(self._compute_weights(y_pred)) def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred): weights = self._compute_weights(y_pred) w_target = numpy.bincount(terminal_regions, weights=self.mask_target * weights) w_original = numpy.bincount(terminal_regions, weights=self.mask_original * weights) # suppressing possibly negative samples w_target = w_target.clip(0) w_original = w_original.clip(0) return numpy.log(w_target + self.regularization) - numpy.log(w_original + self.regularization) # endregion # region FlatnessLossFunction def _exp_margin(margin): """ margin = - y_signed * y_pred """ return numpy.exp(numpy.clip(margin, -1e5, 2)) class AbstractFlatnessLossFunction(AbstractLossFunction): """Base class for FlatnessLosses""" def __init__(self, uniform_features, uniform_label, power=2., fl_coefficient=3., allow_wrong_signs=True): self.uniform_features = uniform_features if isinstance(uniform_label, numbers.Number): self.uniform_label = numpy.array([uniform_label]) else: self.uniform_label = numpy.array(uniform_label) self.power = power self.fl_coefficient = fl_coefficient self.allow_wrong_signs = allow_wrong_signs def fit(self, X, y, sample_weight=None): sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True, normalize_by_class=True) assert len(X) == len(y), 'lengths are different' X = pandas.DataFrame(X) self.group_indices = dict() self.group_matrices = dict() self.group_weights = dict() occurences = numpy.zeros(len(X)) for label in self.uniform_label: self.group_indices[label] = self._compute_groups_indices(X, y, label=label) self.group_matrices[label] = group_indices_to_groups_matrix(self.group_indices[label], len(X)) self.group_weights[label] = compute_group_weights(self.group_matrices[label], sample_weight=sample_weight) for group in self.group_indices[label]: occurences[group] += 1 out_of_bins = (occurences == 0) & numpy.in1d(y, self.uniform_label) if numpy.mean(out_of_bins) > 0.01: warnings.warn("%i events out of all bins " % numpy.sum(out_of_bins), UserWarning) self.y = y self.y_signed = 2 * y - 1 self.sample_weight = numpy.copy(sample_weight) self.divided_weight = sample_weight / numpy.maximum(occurences, 1) return self def _compute_groups_indices(self, X, y, label): raise NotImplementedError('To be overriden in descendants.') def __call__(self, pred): # the actual value does not play any role in boosting # optimizing here return 0 def _compute_fl_derivatives(self, y_pred): y_pred = numpy.ravel(y_pred) neg_gradient = numpy.zeros(len(self.y), dtype=numpy.float) for label in self.uniform_label: label_mask = self.y == label global_positions = numpy.zeros(len(y_pred), dtype=float) global_positions[label_mask] = \ _compute_positions(y_pred[label_mask], sample_weight=self.sample_weight[label_mask]) for indices_in_bin in self.group_indices[label]: local_pos = _compute_positions(y_pred[indices_in_bin], sample_weight=self.sample_weight[indices_in_bin]) global_pos = global_positions[indices_in_bin] bin_gradient = self.power * numpy.sign(local_pos - global_pos) * \ numpy.abs(local_pos - global_pos) ** (self.power - 1) neg_gradient[indices_in_bin] += bin_gradient neg_gradient *= self.divided_weight # check that events outside uniform uniform classes are not touched assert numpy.all(neg_gradient[~numpy.in1d(self.y, self.uniform_label)] == 0) return neg_gradient def negative_gradient(self, y_pred): y_signed = self.y_signed neg_gradient = self._compute_fl_derivatives(y_pred) * self.fl_coefficient # adding ExpLoss neg_gradient += y_signed * self.sample_weight * _exp_margin(-y_signed * y_pred) if not self.allow_wrong_signs: neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5) return neg_gradient class BinFlatnessLossFunction(AbstractFlatnessLossFunction): def __init__(self, uniform_features, uniform_label, n_bins=10, power=2., fl_coefficient=3., allow_wrong_signs=True): r""" This loss function contains separately penalty for non-flatness and for bad prediction quality. See [FL]_ for details. :math:`\text{loss} =\text{ExpLoss} + c \times \text{FlatnessLoss}` FlatnessLoss computed using binning of uniform variables :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired :param int n_bins: number of bins along each variable :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity. :param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class" (i.e. may have negative gradient on signal). If False, values will be clipped to zero. .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.n_bins = n_bins AbstractFlatnessLossFunction.__init__(self, uniform_features, uniform_label=uniform_label, power=power, fl_coefficient=fl_coefficient, allow_wrong_signs=allow_wrong_signs) def _compute_groups_indices(self, X, y, label): """Returns a list, each element is events' indices in some group.""" label_mask = y == label extended_bin_limits = [] for var in self.uniform_features: f_min, f_max = numpy.min(X[var][label_mask]), numpy.max(X[var][label_mask]) extended_bin_limits.append(numpy.linspace(f_min, f_max, 2 * self.n_bins + 1)) groups_indices = list() for shift in [0, 1]: bin_limits = [] for axis_limits in extended_bin_limits: bin_limits.append(axis_limits[1 + shift:-1:2]) bin_indices = compute_bin_indices(X.ix[:, self.uniform_features].values, bin_limits=bin_limits) groups_indices += list(bin_to_group_indices(bin_indices, mask=label_mask)) return groups_indices class KnnFlatnessLossFunction(AbstractFlatnessLossFunction): def __init__(self, uniform_features, uniform_label, n_neighbours=100, power=2., fl_coefficient=3., max_groups=5000, allow_wrong_signs=True, random_state=42): r""" This loss function contains separately penalty for non-flatness and for bad prediction quality. See [FL]_ for details. :math:`\text{loss} = \text{ExpLoss} + c \times \text{FlatnessLoss}` FlatnessLoss computed using nearest neighbors in space of uniform features :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired :param int n_neighbours: number of neighbors used in flatness loss :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity. :param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class" (i.e. may have negative gradient on signal). If False, values will be clipped to zero. :param int max_groups: to limit memory consumption when training sample is large, we randomly pick this number of points with their members. .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity http://arxiv.org/abs/1410.4140 """ self.n_neighbours = n_neighbours self.max_groups = max_groups self.random_state = random_state AbstractFlatnessLossFunction.__init__(self, uniform_features, uniform_label=uniform_label, power=power, fl_coefficient=fl_coefficient, allow_wrong_signs=allow_wrong_signs) def _compute_groups_indices(self, X, y, label): mask = y == label self.random_state = check_random_state(self.random_state) knn_indices = compute_knn_indices_of_signal(X[self.uniform_features], mask, n_neighbours=self.n_neighbours)[mask, :] if len(knn_indices) > self.max_groups: selected_group = self.random_state.choice(len(knn_indices), size=self.max_groups, replace=False) return knn_indices[selected_group, :] else: return knn_indices # endregion
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libtree(CMakePackage): """ldd as a tree with an option to bundle dependencies into a single folder""" homepage = "https://github.com/haampie/libtree" git = "https://github.com/haampie/libtree.git" url = "https://github.com/haampie/libtree/archive/refs/tags/v2.0.0.tar.gz" maintainers = ['haampie'] version('master', branch='master') version('2.0.0', sha256='099e85d8ba3c3d849ce05b8ba2791dd25cd042a813be947fb321b0676ef71883') version('1.2.3', sha256='4a912cf97109219fe931942a30579336b6ab9865395447bd157bbfa74bf4e8cf') version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd') version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0') version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97') version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e') version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b') version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b') def url_for_version(self, version): if version < Version("2.0.0"): return "https://github.com/haampie/libtree/releases/download/v{0}/sources.tar.gz".format(version) return "https://github.com/haampie/libtree/archive/refs/tags/v{0}.tar.gz".format(version) variant('chrpath', default=False, description='Use chrpath for deployment') variant('strip', default=False, description='Use binutils strip for deployment') # header only dependencies depends_on('cpp-termcolor', when='@2.0:', type='build') depends_on('cxxopts', when='@2.0:', type='build') depends_on('elfio', when='@2.0:', type='build') # runtime deps depends_on('chrpath', when='+chrpath', type='run') depends_on('binutils', when='+strip', type='run') # testing depends_on('googletest', type='test') def cmake_args(self): tests_enabled = 'ON' if self.run_tests else 'OFF' if self.spec.satisfies('@2.0:'): tests_define = 'LIBTREE_BUILD_TESTS' else: tests_define = 'BUILD_TESTING' return [ self.define(tests_define, tests_enabled) ] def check(self): with working_dir(self.build_directory): ctest('--output-on-failure') Add libtree 3.0.0-rc9 (#27990) # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.build_systems.cmake import CMakePackage class Libtree(MakefilePackage): """ldd as a tree""" homepage = "https://github.com/haampie/libtree" git = "https://github.com/haampie/libtree.git" url = "https://github.com/haampie/libtree/archive/refs/tags/v2.0.0.tar.gz" maintainers = ['haampie'] version('master', branch='master') version('3.0.0-rc9', sha256='6956c51d58a4f43a23fea0fe7fc5141034829058bfc434e089c9ef1d1848229a') version('2.0.0', sha256='099e85d8ba3c3d849ce05b8ba2791dd25cd042a813be947fb321b0676ef71883') version('1.2.3', sha256='4a912cf97109219fe931942a30579336b6ab9865395447bd157bbfa74bf4e8cf') version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd') version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0') version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97') version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e') version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b') version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374') version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4') version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536') version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45') version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b') def url_for_version(self, version): if version < Version("2.0.0"): return "https://github.com/haampie/libtree/releases/download/v{0}/sources.tar.gz".format(version) return "https://github.com/haampie/libtree/archive/refs/tags/v{0}.tar.gz".format(version) # Version 3.x (Makefile) @when('@3:') def install(self, spec, prefix): make('install', 'PREFIX=' + prefix) # Version 2.x and earlier (CMake) with when('@:2'): variant('chrpath', default=False, description='Use chrpath for deployment') variant('strip', default=False, description='Use binutils strip for deployment') variant('build_type', default='RelWithDebInfo', description='CMake build type', values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel')) depends_on('googletest', type='test') depends_on('cmake@3:', type='build') depends_on('chrpath', when='+chrpath', type='run') depends_on('binutils', when='+strip', type='run') # header only dependencies depends_on('cpp-termcolor', when='@2.0.0:2', type='build') depends_on('cxxopts', when='@2.0.0:2', type='build') depends_on('elfio', when='@2.0.0:2', type='build') def cmake_args(self): tests_enabled = 'ON' if self.run_tests else 'OFF' if self.spec.satisfies('@2.0:'): tests_define = 'LIBTREE_BUILD_TESTS' else: tests_define = 'BUILD_TESTING' return [ CMakePackage.define(tests_define, tests_enabled) ] @when('@:2') def edit(self, spec, prefix): options = CMakePackage._std_args(self) + self.cmake_args() options.append(self.stage.source_path) with working_dir(self.build_directory): cmake(*options) @when('@:2') def check(self): with working_dir(self.build_directory): ctest('--output-on-failure')
from __future__ import print_function import sys import numpy as np import os import glob import pickle as cPickle import csv import ntpath from scipy import linalg as la from scipy.spatial import distance import sklearn.svm import sklearn.decomposition import sklearn.ensemble import plotly import plotly.subplots import plotly.graph_objs as go import sklearn.metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler sys.path.insert(0, os.path.join( os.path.dirname(os.path.realpath(__file__)), "../")) from pyAudioAnalysis import MidTermFeatures as aF from pyAudioAnalysis import audioBasicIO from sklearn.model_selection import GroupShuffleSplit from imblearn.over_sampling import SMOTE from imblearn.under_sampling import RandomUnderSampler shortTermWindow = 0.050 shortTermStep = 0.050 eps = 0.00000001 class Knn: def __init__(self, features, labels, neighbors): self.features = features self.labels = labels self.neighbors = neighbors def classify(self, test_sample): n_classes = np.unique(self.labels).shape[0] y_dist = (distance.cdist(self.features, test_sample.reshape(1, test_sample.shape[0]), 'euclidean')).T i_sort = np.argsort(y_dist) P = np.zeros((n_classes,)) for i in range(n_classes): P[i] = np.nonzero(self.labels[i_sort[0] [0:self.neighbors]] == i)[0].shape[0] / float(self.neighbors) return np.argmax(P), P def classifier_wrapper(classifier, classifier_type, test_sample): """ This function is used as a wrapper to pattern classification. ARGUMENTS: - classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble. RandomForestClassifier or sklearn.ensemble. GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier - classifier_type: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees" - test_sample: a feature vector (np array) RETURNS: - R: class ID - P: probability estimate EXAMPLE (for some audio signal stored in array x): import audioFeatureExtraction as aF import audioTrainTest as aT # load the classifier (here SVM, for kNN use load_model_knn instead): [classifier, MEAN, STD, classNames, mt_win, mt_step, st_win, st_step] = aT.load_model(model_name) # mid-term feature extraction: [mt_features, _, _] = aF.mid_feature_extraction(x, Fs, mt_win * Fs, mt_step * Fs, round(Fs*st_win), round(Fs*st_step)); # feature normalization: curFV = (mt_features[:, i] - MEAN) / STD; # classification [Result, P] = classifierWrapper(classifier, model_type, curFV) """ class_id = -1 probability = -1 if classifier_type == "knn": class_id, probability = classifier.classify(test_sample) elif classifier_type == "svm" or \ classifier_type == "randomforest" or \ classifier_type == "gradientboosting" or \ classifier_type == "extratrees" or \ classifier_type == "svm_rbf": class_id = classifier.predict(test_sample.reshape(1, -1))[0] probability = classifier.predict_proba(test_sample.reshape(1, -1))[0] return class_id, probability def regression_wrapper(model, model_type, test_sample): """ This function is used as a wrapper to pattern classification. ARGUMENTS: - model: regression model - model_type: "svm" or "knn" (TODO) - test_sample: a feature vector (np array) RETURNS: - R: regression result (estimated value) EXAMPLE (for some audio signal stored in array x): TODO """ if model_type == "svm" or model_type == "randomforest" or \ model_type == "svm_rbf": return model.predict(test_sample.reshape(1,-1))[0] # elif classifier_type == "knn": # TODO def train_knn(features, labels, neighbors): """ Train a kNN classifier. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - neighbors: parameter K RETURNS: - kNN: the trained kNN variable """ knn = Knn(features, labels, neighbors) return knn def train_svm(features, labels, c_param, kernel='linear'): """ Train a multi-class probabilitistic SVM classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - c_param: SVM parameter C (cost of constraints violation) RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. """ svm = sklearn.svm.SVC(C=c_param, kernel=kernel, probability=True, gamma='auto') svm.fit(features, labels) return svm def train_random_forest(features, labels, n_estimators): """ Train a multi-class random forest classifier. Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - n_estimators: number of trees in the forest RETURNS: - rf: the trained random forest """ rf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_estimators) rf.fit(features, labels) return rf def train_gradient_boosting(features, labels, n_estimators): """ Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - n_estimators: number of trees in the forest RETURNS: - rf: the trained model """ rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=n_estimators) rf.fit(features, labels) return rf def train_extra_trees(features, labels, n_estimators): """ Train an extra tree Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest RETURNS: - et: the trained model """ et = sklearn.ensemble.ExtraTreesClassifier(n_estimators=n_estimators) et.fit(features, labels) return et def train_svm_regression(features, labels, c_param, kernel='linear'): svm = sklearn.svm.SVR(C=c_param, kernel=kernel) svm.fit(features, labels) train_err = np.mean(np.abs(svm.predict(features) - labels)) return svm, train_err def train_random_forest_regression(features, labels, n_estimators): rf = sklearn.ensemble.RandomForestRegressor(n_estimators=n_estimators) rf.fit(features, labels) train_err = np.mean(np.abs(rf.predict(features) - labels)) return rf, train_err def extract_features_and_train(paths, mid_window, mid_step, short_window, short_step, classifier_type, model_name, compute_beat=False, train_percentage=0.90, dict_of_ids=None, use_smote=False): """ This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: paths: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files. mid_window, mid_step: mid-term window length and step short_window, short_step: short-term window and step classifier_type: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees" model_name: name of the model to be saved dict_of_ids: a dictionary which has as keys the full path of audio files and as values the respective group ids RETURNS: None. Resulting classifier along with the respective model parameters are saved on files. """ # STEP A: Feature Extraction: features, class_names, file_names = \ aF.multiple_directory_feature_extraction(paths, mid_window, mid_step, short_window, short_step, compute_beat=compute_beat) file_names = [item for sublist in file_names for item in sublist] if dict_of_ids: list_of_ids = [dict_of_ids[file] for file in file_names] else: list_of_ids = None if len(features) == 0: print("trainSVM_feature ERROR: No data found in any input folder!") return n_feats = features[0].shape[1] feature_names = ["features" + str(d + 1) for d in range(n_feats)] for i, feat in enumerate(features): if len(feat) == 0: print("trainSVM_feature ERROR: " + paths[i] + " folder is empty or non-existing!") return # STEP B: classifier Evaluation and Parameter Selection: if classifier_type == "svm" or classifier_type == "svm_rbf": classifier_par = np.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0, 20.0]) elif classifier_type == "randomforest": classifier_par = np.array([10, 25, 50, 100, 200, 500]) elif classifier_type == "knn": classifier_par = np.array([1, 3, 5, 7, 9, 11, 13, 15]) elif classifier_type == "gradientboosting": classifier_par = np.array([10, 25, 50, 100, 200, 500]) elif classifier_type == "extratrees": classifier_par = np.array([10, 25, 50, 100, 200, 500]) # get optimal classifier parameter: temp_features = [] for feat in features: temp = [] for i in range(feat.shape[0]): temp_fv = feat[i, :] if (not np.isnan(temp_fv).any()) and (not np.isinf(temp_fv).any()): temp.append(temp_fv.tolist()) else: print("NaN Found! Feature vector not used for training") temp_features.append(np.array(temp)) features = temp_features best_param = evaluate_classifier(features, class_names, classifier_type, classifier_par, 0, list_of_ids, n_exp=-1, train_percentage=train_percentage, smote=use_smote) print("Selected params: {0:.5f}".format(best_param)) # STEP C: Train and Save the classifier to file # First Use mean/std standard feature scaling: features, labels = features_to_matrix(features) scaler = StandardScaler() features = scaler.fit_transform(features) mean = scaler.mean_.tolist() std = scaler.scale_.tolist() # Then train the final classifier if classifier_type == "svm": classifier = train_svm(features, labels, best_param) elif classifier_type == "svm_rbf": classifier = train_svm(features, labels, best_param, kernel='rbf') elif classifier_type == "randomforest": classifier = train_random_forest(features, labels, best_param) elif classifier_type == "gradientboosting": classifier = train_gradient_boosting(features, labels, best_param) elif classifier_type == "extratrees": classifier = train_extra_trees(features, labels, best_param) # And save the model to a file, along with # - the scaling -mean/std- vectors) # - the feature extraction parameters if classifier_type == "knn": feature_matrix = features.tolist() labels = labels.tolist() save_path = model_name save_parameters(save_path, feature_matrix, labels, mean, std, class_names, best_param, mid_window, mid_step, short_window, short_step, compute_beat) elif classifier_type == "svm" or classifier_type == "svm_rbf" or \ classifier_type == "randomforest" or \ classifier_type == "gradientboosting" or \ classifier_type == "extratrees": with open(model_name, 'wb') as fid: cPickle.dump(classifier, fid) save_path = model_name + "MEANS" save_parameters(save_path, mean, std, class_names, mid_window, mid_step, short_window, short_step, compute_beat) def save_parameters(path, *parameters): with open(path, 'wb') as file_handle: for param in parameters: cPickle.dump(param, file_handle, protocol=cPickle.HIGHEST_PROTOCOL) def feature_extraction_train_regression(folder_name, mid_window, mid_step, short_window, short_step, model_type, model_name, compute_beat=False): """ This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: folder_name: path of directory containing the WAV files and Regression CSVs mt_win, mt_step: mid-term window length and step st_win, st_step: short-term window and step model_type: "svm" or "knn" or "randomforest" model_name: name of the model to be saved RETURNS: None. Resulting regression model along with the respective model parameters are saved on files. """ # STEP A: Feature Extraction: features, _, filenames = \ aF.multiple_directory_feature_extraction([folder_name], mid_window, mid_step, short_window, short_step, compute_beat=compute_beat) features = features[0] filenames = [ntpath.basename(f) for f in filenames[0]] f_final = [] # Read CSVs: csv_files = glob.glob(folder_name + os.sep + "*.csv") regression_labels = [] regression_names = [] f_final = [] for c in csv_files: cur_regression_labels = [] f_temp = [] # open the csv file that contains the current target value's annotations with open(c, 'rt') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in csv_reader: if len(row) == 2: # ... and if the current filename exists # in the list of filenames if row[0] in filenames: index = filenames.index(row[0]) cur_regression_labels.append(float(row[1])) f_temp.append(features[index, :]) else: print("Warning: {} not found " "in list of files.".format(row[0])) else: print("Warning: Row with unknown format in regression file") f_final.append(np.array(f_temp)) # cur_regression_labels is the list of values # for the current regression problem regression_labels.append(np.array(cur_regression_labels)) # regression task name regression_names.append(ntpath.basename(c).replace(".csv", "")) if len(features) == 0: print("ERROR: No data found in any input folder!") return # STEP B: classifier Evaluation and Parameter Selection: if model_type == "svm" or model_type == "svm_rbf": model_params = np.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0]) elif model_type == "randomforest": model_params = np.array([5, 10, 25, 50, 100]) errors = [] errors_base = [] best_params = [] for iRegression, r in enumerate(regression_names): # get optimal classifeir parameter: print("Regression task " + r) bestParam, error, berror = evaluate_regression(f_final[iRegression], regression_labels[ iRegression], 100, model_type, model_params) errors.append(error) errors_base.append(berror) best_params.append(bestParam) print("Selected params: {0:.5f}".format(bestParam)) # scale the features (mean-std) and keep the mean/std parameters # to be saved with the model scaler = StandardScaler() features_norm = scaler.fit_transform(f_final[iRegression]) mean = scaler.mean_.tolist() std = scaler.scale_.tolist() # STEP C: Save the model to file if model_type == "svm": classifier, _ = train_svm_regression(features_norm, regression_labels[iRegression], bestParam) if model_type == "svm_rbf": classifier, _ = train_svm_regression(features_norm, regression_labels[iRegression], bestParam, kernel='rbf') if model_type == "randomforest": classifier, _ = train_random_forest_regression(features_norm, regression_labels[ iRegression], bestParam) # Save the model to a file, along with # - the scaling -mean/std- vectors) # - the feature extraction parameters if model_type == "svm" or model_type == "svm_rbf" \ or model_type == "randomforest": with open(model_name + "_" + r, 'wb') as fid: cPickle.dump(classifier, fid) save_path = model_name + "_" + r + "MEANS" save_parameters(save_path, mean, std, mid_window, mid_step, short_window, short_step, compute_beat) return errors, errors_base, best_params def load_model_knn(knn_model_name, is_regression=False): with open(knn_model_name, "rb") as fo: features = cPickle.load(fo) labels = cPickle.load(fo) mean = cPickle.load(fo) std = cPickle.load(fo) if not is_regression: classes = cPickle.load(fo) neighbors = cPickle.load(fo) mid_window = cPickle.load(fo) mid_step = cPickle.load(fo) short_window = cPickle.load(fo) short_step = cPickle.load(fo) compute_beat = cPickle.load(fo) features = np.array(features) labels = np.array(labels) mean = np.array(mean) std = np.array(std) classifier = Knn(features, labels, neighbors) # Note: a direct call to the kNN constructor is used here if is_regression: return classifier, mean, std, mid_window, mid_step, short_window, \ short_step, compute_beat else: return classifier, mean, std, classes, mid_window, mid_step, \ short_window, short_step, compute_beat def load_model(model_name, is_regression=False): """ This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodel_name: the path of the model to be loaded - is_regression: a flag indigating whereas this model is regression or not """ with open(model_name + "MEANS", "rb") as fo: mean = cPickle.load(fo) std = cPickle.load(fo) if not is_regression: classNames = cPickle.load(fo) mid_window = cPickle.load(fo) mid_step = cPickle.load(fo) short_window = cPickle.load(fo) short_step = cPickle.load(fo) compute_beat = cPickle.load(fo) mean = np.array(mean) std = np.array(std) with open(model_name, 'rb') as fid: svm_model = cPickle.load(fid) if is_regression: return svm_model, mean, std, mid_window, mid_step, short_window, \ short_step, compute_beat else: return svm_model, mean, std, classNames, mid_window, mid_step, \ short_window, short_step, compute_beat def group_split(X, y, train_indeces, test_indeces, split_id): """ This function splits the data in train and test set according to train/test indeces based on LeaveOneGroupOut ARGUMENTS: X: array-like of shape (n_samples, n_features) y: array-like of shape (n_samples,) train_indeces: The training set indices test_indeces: The testing set indices split_id: the split number RETURNS: List containing train-test split of inputs. """ train_index = train_indeces[split_id] test_index = test_indeces[split_id] X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] return X_train, X_test, y_train, y_test def evaluate_classifier(features, class_names, classifier_name, params, parameter_mode, list_of_ids=None, n_exp=-1, train_percentage=0.90, smote=False): """ ARGUMENTS: features: a list ([numOfClasses x 1]) whose elements containt np matrices of features. Each matrix features[i] of class i is [n_samples x numOfDimensions] class_names: list of class names (strings) classifier_name: svm or knn or randomforest params: list of classifier parameters (for parameter tuning during cross-validation) parameter_mode: 0: choose parameters that lead to maximum overall classification ACCURACY 1: choose parameters that lead to maximum overall f1 MEASURE n_exp: number of cross-validation experiments (use -1 for auto calculation based on the num of samples) train_percentage: percentage of training (vs validation) data default 0.90 RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure """ # transcode list of feature matrices to X, y (sklearn) X, y = features_to_matrix(features) # features_norm = features; n_classes = len(features) ac_all = [] f1_all = [] pre_class_all = [] rec_classes_all = [] f1_classes_all = [] cms_all = [] # dynamically compute total number of samples: # (so that if number of samples is >10K only one train-val repetition # is performed) n_samples_total = X.shape[0] if n_exp == -1: n_exp = int(50000 / n_samples_total) + 1 if list_of_ids: train_indeces, test_indeces = [], [] gss = GroupShuffleSplit(n_splits=n_exp, train_size=.8) for train_index, test_index in gss.split(X, y, list_of_ids): train_indeces.append(train_index) test_indeces.append(test_index) for Ci, C in enumerate(params): # for each param value cm = np.zeros((n_classes, n_classes)) f1_per_exp = [] r1t_all = [] p1t_all = [] y_pred_all = [] y_test_all = [] for e in range(n_exp): y_pred, y_real = [], [] # for each cross-validation iteration: print("Param = {0:.5f} - classifier Evaluation " "Experiment {1:d} of {2:d}".format(C, e+1, n_exp)) # split features: if list_of_ids: X_train, X_test, y_train, y_test = group_split(X, y, train_indeces, test_indeces, e) else: X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=1-train_percentage) # mean/std scale the features: scaler = StandardScaler() if smote: sm = SMOTE(random_state = 2) #sm = RandomUnderSampler(random_state=0) X_train, y_train = sm.fit_resample(X_train, y_train) scaler.fit(X_train) X_train = scaler.transform(X_train) # train multi-class svms: if classifier_name == "svm": classifier = train_svm(X_train, y_train, C) elif classifier_name == "svm_rbf": classifier = train_svm(X_train, y_train, C, kernel='rbf') elif classifier_name == "knn": classifier = train_knn(X_train, y_train, C) elif classifier_name == "randomforest": classifier = train_random_forest(X_train, y_train, C) elif classifier_name == "gradientboosting": classifier = train_gradient_boosting(X_train, y_train, C) elif classifier_name == "extratrees": classifier = train_extra_trees(X_train, y_train,C) # get predictions and compute current comfusion matrix cmt = np.zeros((n_classes, n_classes)) X_test = scaler.transform(X_test) for i_test_sample in range(X_test.shape[0]): y_pred.append(classifier_wrapper(classifier, classifier_name, X_test[i_test_sample, :])[0]) cmt = sklearn.metrics.confusion_matrix(y_test, y_pred) f1t = sklearn.metrics.f1_score(y_test, y_pred, average='macro') r1t_all.append(sklearn.metrics.recall_score(y_test, y_pred, average='macro')) p1t_all.append(sklearn.metrics.precision_score(y_test, y_pred, average='macro')) y_pred_all += y_pred y_test_all += y_test.tolist() f1_per_exp.append(f1t) if cmt.size != cm.size: all_classes = set(y) split_classes = set(y_test) missing_classes = all_classes.difference(split_classes) missing_classes = list(missing_classes) missing_classes = [int(x) for x in missing_classes] cmt = np.insert(cmt, missing_classes, 0, axis=0) cmt = np.insert(cmt, missing_classes, 0, axis=1) cm = cm + cmt cm = cm + 0.0000000010 rec = np.array([cm[ci, ci] / np.sum(cm[ci, :]) for ci in range(cm.shape[0])]) pre = np.array([cm[ci, ci] / np.sum(cm[:, ci]) for ci in range(cm.shape[0])]) pre_class_all.append(pre) rec_classes_all.append(rec) f1 = 2 * rec * pre / (rec + pre) # this is just for debugging (it should be equal to f1) f1_b = sklearn.metrics.f1_score(y_test_all, y_pred_all, average='macro') # Note: np.mean(f1_per_exp) will not be exacty equal to the # overall f1 (i.e. f1 and f1_b because these are calculated on a # per-sample basis) f1_std = np.std(f1_per_exp) print(np.mean(f1), f1_b, f1_std) f1_classes_all.append(f1) ac_all.append(np.sum(np.diagonal(cm)) / np.sum(cm)) cms_all.append(cm) f1_all.append(np.mean(f1)) print("\t\t", end="") for i, c in enumerate(class_names): if i == len(class_names)-1: print("{0:s}\t\t".format(c), end="") else: print("{0:s}\t\t\t".format(c), end="") print("OVERALL") print("\tC", end="") for c in class_names: print("\tPRE\tREC\tf1", end="") print("\t{0:s}\t{1:s}".format("ACC", "f1")) best_ac_ind = np.argmax(ac_all) best_f1_ind = np.argmax(f1_all) for i in range(len(pre_class_all)): print("\t{0:.3f}".format(params[i]), end="") for c in range(len(pre_class_all[i])): print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * pre_class_all[i][c], 100.0 * rec_classes_all[i][c], 100.0 * f1_classes_all[i][c]), end="") print("\t{0:.1f}\t{1:.1f}".format(100.0 * ac_all[i], 100.0 * f1_all[i]), end="") if i == best_f1_ind: print("\t best f1", end="") if i == best_ac_ind: print("\t best Acc", end="") print("") if parameter_mode == 0: # keep parameters that maximize overall classification accuracy: print("Confusion Matrix:") print_confusion_matrix(cms_all[best_ac_ind], class_names) return params[best_ac_ind] elif parameter_mode == 1: # keep parameters that maximize overall f1 measure: print("Confusion Matrix:") print_confusion_matrix(cms_all[best_f1_ind], class_names) return params[best_f1_ind] def evaluate_regression(features, labels, n_exp, method_name, params): """ ARGUMENTS: features: np matrices of features [n_samples x numOfDimensions] labels: list of sample labels n_exp: number of cross-validation experiments method_name: "svm" or "randomforest" params: list of classifier params to be evaluated RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure """ # mean/std feature scaling: scaler = StandardScaler() features_norm = scaler.fit_transform(features) n_samples = labels.shape[0] per_train = 0.9 errors_all = [] er_train_all = [] er_base_all = [] for Ci, C in enumerate(params): # for each param value errors = [] errors_train = [] errors_baseline = [] for e in range(n_exp): # for each cross-validation iteration: # split features: randperm = np.random.permutation(range(n_samples)) n_train = int(round(per_train * n_samples)) f_train = [features_norm[randperm[i]] for i in range(n_train)] f_test = [features_norm[randperm[i+n_train]] for i in range(n_samples - n_train)] l_train = [labels[randperm[i]] for i in range(n_train)] l_test = [labels[randperm[i + n_train]] for i in range(n_samples - n_train)] # train multi-class svms: f_train = np.matrix(f_train) if method_name == "svm": classifier, train_err = \ train_svm_regression(f_train, l_train, C) elif method_name == "svm_rbf": classifier, train_err = \ train_svm_regression(f_train, l_train, C, kernel='rbf') elif method_name == "randomforest": classifier, train_err = \ train_random_forest_regression(f_train, l_train, C) error_test = [] error_test_baseline = [] for itest, fTest in enumerate(f_test): R = regression_wrapper(classifier, method_name, fTest) Rbaseline = np.mean(l_train) error_test.append((R - l_test[itest]) * (R - l_test[itest])) error_test_baseline.append((Rbaseline - l_test[itest]) * (Rbaseline - l_test[itest])) error = np.array(error_test).mean() error_baseline = np.array(error_test_baseline).mean() errors.append(error) errors_train.append(train_err) errors_baseline.append(error_baseline) errors_all.append(np.array(errors).mean()) er_train_all.append(np.array(errors_train).mean()) er_base_all.append(np.array(errors_baseline).mean()) best_ind = np.argmin(errors_all) print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE")) for i in range(len(errors_all)): print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(params[i], errors_all[i], er_train_all[i], er_base_all[i]), end="") if i == best_ind: print("\t\t best",end="") print("") return params[best_ind], errors_all[best_ind], er_base_all[best_ind] def print_confusion_matrix(cm, class_names): """ This function prints a confusion matrix for a particular classification task. ARGUMENTS: cm: a 2-D np array of the confusion matrix (cm[i,j] is the number of times a sample from class i was classified in class j) class_names: a list that contains the names of the classes """ if cm.shape[0] != len(class_names): print("printConfusionMatrix: Wrong argument sizes\n") return for c in class_names: if len(c) > 4: c = c[0:3] print("\t{0:s}".format(c), end="") print("") for i, c in enumerate(class_names): if len(c) > 4: c = c[0:3] print("{0:s}".format(c), end="") for j in range(len(class_names)): print("\t{0:.2f}".format(100.0 * cm[i][j] / np.sum(cm)), end="") print("") def features_to_matrix(features): """ features_to_matrix(features) This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels. ARGUMENTS: - features: a list of feature matrices RETURNS: - feature_matrix: a concatenated matrix of features - labels: a vector of class indices """ labels = np.array([]) feature_matrix = np.array([]) for i, f in enumerate(features): if i == 0: feature_matrix = f labels = i * np.ones((len(f), 1)) else: feature_matrix = np.vstack((feature_matrix, f)) labels = np.append(labels, i * np.ones((len(f), 1))) return feature_matrix, labels def pca_wrapper(features, dimensions): features, labels = features_to_matrix(features) pca = sklearn.decomposition.PCA(n_components = dimensions) pca.fit(features) coeff = pca.components_ coeff = coeff[:, 0:dimensions] features_transformed = [] for f in features: ft = f.copy() # ft = pca.transform(ft, k=nDims) ft = np.dot(f, coeff) features_transformed.append(ft) return features_transformed, coeff def compute_class_rec_pre_f1(c_mat): """ Gets recall, precision and f1 PER CLASS, given the confusion matrix :param c_mat: the [n_class x n_class] confusion matrix :return: rec, pre and f1 for each class """ n_class = c_mat.shape[0] rec, pre, f1 = [], [], [] for i in range(n_class): rec.append(float(c_mat[i, i]) / np.sum(c_mat[i, :])) pre.append(float(c_mat[i, i]) / np.sum(c_mat[:, i])) f1.append(2 * rec[-1] * pre[-1] / (rec[-1] + pre[-1])) return rec, pre, f1 def evaluate_model_for_folders(input_test_folders, model_name, model_type, positive_class, plot=True): """ evaluate_model_for_folders(input_test_folders, model_name, model_type) This function evaluates a model by computing the confusion matrix, the per class performance metrics and by generating a ROC and Precision / Recall diagrams (for a particular class of interest), for a given test dataset. The dataset needs to be organized in folders (one folder per audio class), exactly like in extract_features_and_train() :param input_test_folders: list of folders (each folder represents a separate audio class) :param model_name: path to the model to be tested :param model_type: type of the model :param positive_class name of the positive class :param plot (True default) if to plot 2 diagrams on plotly :return: thr_prre, pre, rec (thresholds, precision recall values) thr_roc, fpr, tpr (thresholds, false positive , true positive rates) Usage example: from pyAudioAnalysis import audioTrainTest as aT thr_prre, pre, rec, thr_roc, fpr, tpr = aT.evaluate_model_for_folders(["4_classes_small/speech", "4_classes_small/music"], "data/models/svm_rbf_4class", "svm_rbf", "speech") """ class_names = [] y_true_binary = [] y_true = [] y_pred = [] probs_positive = [] for i, d in enumerate(input_test_folders): if d[-1] == os.sep: class_names.append(d.split(os.sep)[-2]) else: class_names.append(d.split(os.sep)[-1]) types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(d, files))) # get list of audio files for current folder and run classifier for w in wav_file_list: c, p, probs_names = file_classification(w, model_name, model_type) y_pred.append(c) y_true.append(probs_names.index(class_names[i])) if i==probs_names.index(positive_class): y_true_binary.append(1) else: y_true_binary.append(0) prob_positive = p[probs_names.index(positive_class)] probs_positive.append(prob_positive) pre, rec, thr_prre = sklearn.metrics.precision_recall_curve(y_true_binary, probs_positive) fpr, tpr, thr_roc = sklearn.metrics.roc_curve(y_true_binary, probs_positive) cm = sklearn.metrics.confusion_matrix(y_true, y_pred) rec_c, pre_c, f1_c = compute_class_rec_pre_f1(cm) f1 = (sklearn.metrics.f1_score(y_true, y_pred, average='macro')) acc = (sklearn.metrics.accuracy_score(y_true, y_pred)) print(cm) print(rec_c, pre_c, f1_c, f1, acc) if plot: titles = ["Confusion matrix, acc = {0:.1f}%, " " F1 (macro): {1:.1f}%".format(100 * acc, 100 * f1), "Class-wise Performance measures", "Pre vs Rec for " + positive_class, "ROC for " + positive_class] figs = plotly.subplots.make_subplots(rows=2, cols=2, subplot_titles=titles) heatmap = go.Heatmap(z=np.flip(cm, axis=0), x=class_names, y=list(reversed(class_names)), colorscale=[[0, '#4422ff'], [1, '#ff4422']], name="confusin matrix", showscale=False) mark_prop1 = dict(color='rgba(80, 220, 150, 0.5)', line=dict(color='rgba(80, 220, 150, 1)', width=2)) mark_prop2 = dict(color='rgba(80, 150, 220, 0.5)', line=dict(color='rgba(80, 150, 220, 1)', width=2)) mark_prop3 = dict(color='rgba(250, 150, 150, 0.5)', line=dict(color='rgba(250, 150, 150, 1)', width=3)) b1 = go.Bar(x=class_names, y=rec_c, name="Recall", marker=mark_prop1) b2 = go.Bar(x=class_names, y=pre_c, name="Precision", marker=mark_prop2) b3 = go.Bar(x=class_names, y=f1_c, name="F1", marker=mark_prop3) figs.append_trace(heatmap, 1, 1); figs.append_trace(b1, 1, 2) figs.append_trace(b2, 1, 2); figs.append_trace(b3, 1, 2) figs.append_trace(go.Scatter(x=thr_prre, y=pre, name="Precision", marker=mark_prop1), 2, 1) figs.append_trace(go.Scatter(x=thr_prre, y=rec, name="Recall", marker=mark_prop2), 2, 1) figs.append_trace(go.Scatter(x=fpr, y=tpr, showlegend=False), 2, 2) figs.update_xaxes(title_text="threshold", row=2, col=1) figs.update_xaxes(title_text="false positive rate", row=2, col=2) figs.update_yaxes(title_text="true positive rate", row=2, col=2) plotly.offline.plot(figs, filename="temp.html", auto_open=True) return cm, thr_prre, pre, rec, thr_roc, fpr, tpr def file_classification(input_file, model_name, model_type): # Load classifier: if not os.path.isfile(model_name): print("fileClassification: input model_name not found!") return -1, -1, -1 if isinstance(input_file, str) and not os.path.isfile(input_file): print("fileClassification: wav file not found!") return -1, -1, -1 if model_type == 'knn': classifier, mean, std, classes, mid_window, mid_step, short_window, \ short_step, compute_beat = load_model_knn(model_name) else: classifier, mean, std, classes, mid_window, mid_step, short_window, \ short_step, compute_beat = load_model(model_name) # read audio file and convert to mono sampling_rate, signal = audioBasicIO.read_audio_file(input_file) signal = audioBasicIO.stereo_to_mono(signal) if sampling_rate == 0: # audio file IO problem return -1, -1, -1 if signal.shape[0] / float(sampling_rate) < mid_window: mid_window = signal.shape[0] / float(sampling_rate) # feature extraction: mid_features, s, _ = \ aF.mid_feature_extraction(signal, sampling_rate, mid_window * sampling_rate, mid_step * sampling_rate, round(sampling_rate * short_window), round(sampling_rate * short_step)) # long term averaging of mid-term statistics mid_features = mid_features.mean(axis=1) if compute_beat: beat, beat_conf = aF.beat_extraction(s, short_step) mid_features = np.append(mid_features, beat) mid_features = np.append(mid_features, beat_conf) feature_vector = (mid_features - mean) / std # normalization # classification class_id, probability = classifier_wrapper(classifier, model_type, feature_vector) return class_id, probability, classes def file_regression(input_file, model_name, model_type): # Load classifier: if not os.path.isfile(input_file): print("fileClassification: wav file not found!") return -1, -1, -1 regression_models = glob.glob(model_name + "_*") regression_models2 = [] for r in regression_models: if r[-5::] != "MEANS": regression_models2.append(r) regression_models = regression_models2 regression_names = [] for r in regression_models: regression_names.append(r[r.rfind("_")+1::]) # FEATURE EXTRACTION # LOAD ONLY THE FIRST MODEL (for mt_win, etc) if model_type == 'svm' or model_type == "svm_rbf" or \ model_type == 'randomforest': _, _, _, mid_window, mid_step, short_window, short_step, compute_beat \ = load_model(regression_models[0], True) # read audio file and convert to mono samping_rate, signal = audioBasicIO.read_audio_file(input_file) signal = audioBasicIO.stereo_to_mono(signal) # feature extraction: mid_features, s, _ = \ aF.mid_feature_extraction(signal, samping_rate, mid_window * samping_rate, mid_step * samping_rate, round(samping_rate * short_window), round(samping_rate * short_step)) # long term averaging of mid-term statistics mid_features = mid_features.mean(axis=1) if compute_beat: beat, beat_conf = aF.beat_extraction(s, short_step) mid_features = np.append(mid_features, beat) mid_features = np.append(mid_features, beat_conf) # REGRESSION R = [] for ir, r in enumerate(regression_models): if not os.path.isfile(r): print("fileClassification: input model_name not found!") return (-1, -1, -1) if model_type == 'svm' or model_type == "svm_rbf" \ or model_type == 'randomforest': model, mean, std, _, _, _, _, _ = load_model(r, True) curFV = (mid_features - mean) / std # normalization R.append(regression_wrapper(model, model_type, curFV)) # classification return R, regression_names def lda(data, labels, red_dim): # Centre data data -= data.mean(axis=0) n_data = np.shape(data)[0] n_dim = np.shape(data)[1] Sw = np.zeros((n_dim, n_dim)) C = np.cov((data.T)) # Loop over classes classes = np.unique(labels) for i in range(len(classes)): # Find relevant datapoints indices = (np.where(labels == classes[i])) d = np.squeeze(data[indices, :]) classcov = np.cov((d.T)) Sw += float(np.shape(indices)[0])/n_data * classcov Sb = C - Sw # Now solve for W # Compute eigenvalues, eigenvectors and sort into order evals, evecs = la.eig(Sw, Sb) indices = np.argsort(evals) indices = indices[::-1] evecs = evecs[:, indices] w = evecs[:, :red_dim] new_data = np.dot(data, w) return new_data, w def train_speaker_models(): """ This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included) import audioTrainTest as aT aT.trainSpeakerModelsScript() """ mt_win = 2.0 mt_step = 2.0 st_win = 0.020 st_step = 0.020 dir_name = "DIARIZATION_ALL/all" list_of_dirs = [os.path.join(dir_name, name) for name in os.listdir(dir_name) if os.path.isdir(os.path.join(dir_name, name))] extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step, "knn", "data/knnSpeakerAll", compute_beat=False, train_percentage=0.50) dir_name = "DIARIZATION_ALL/female_male" list_of_dirs = [os.path.join(dir_name, name) for name in os.listdir(dir_name) if os.path.isdir(os.path.join(dir_name, name))] extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step, "knn", "data/knnSpeakerFemaleMale", compute_beat=False, train_percentage=0.50) def main(argv): return 0 if __name__ == '__main__': main(sys.argv) autopep8 from __future__ import print_function from imblearn.under_sampling import RandomUnderSampler from imblearn.over_sampling import SMOTE from sklearn.model_selection import GroupShuffleSplit from pyAudioAnalysis import audioBasicIO from pyAudioAnalysis import MidTermFeatures as aF import sys import numpy as np import os import glob import pickle as cPickle import csv import ntpath from scipy import linalg as la from scipy.spatial import distance import sklearn.svm import sklearn.decomposition import sklearn.ensemble import plotly import plotly.subplots import plotly.graph_objs as go import sklearn.metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler sys.path.insert(0, os.path.join( os.path.dirname(os.path.realpath(__file__)), "../")) shortTermWindow = 0.050 shortTermStep = 0.050 eps = 0.00000001 class Knn: def __init__(self, features, labels, neighbors): self.features = features self.labels = labels self.neighbors = neighbors def classify(self, test_sample): n_classes = np.unique(self.labels).shape[0] y_dist = (distance.cdist(self.features, test_sample.reshape(1, test_sample.shape[0]), 'euclidean')).T i_sort = np.argsort(y_dist) P = np.zeros((n_classes,)) for i in range(n_classes): P[i] = np.nonzero(self.labels[i_sort[0] [0:self.neighbors]] == i)[0].shape[0] / float(self.neighbors) return np.argmax(P), P def classifier_wrapper(classifier, classifier_type, test_sample): """ This function is used as a wrapper to pattern classification. ARGUMENTS: - classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble. RandomForestClassifier or sklearn.ensemble. GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier - classifier_type: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees" - test_sample: a feature vector (np array) RETURNS: - R: class ID - P: probability estimate EXAMPLE (for some audio signal stored in array x): import audioFeatureExtraction as aF import audioTrainTest as aT # load the classifier (here SVM, for kNN use load_model_knn instead): [classifier, MEAN, STD, classNames, mt_win, mt_step, st_win, st_step] = aT.load_model(model_name) # mid-term feature extraction: [mt_features, _, _] = aF.mid_feature_extraction(x, Fs, mt_win * Fs, mt_step * Fs, round(Fs*st_win), round(Fs*st_step)); # feature normalization: curFV = (mt_features[:, i] - MEAN) / STD; # classification [Result, P] = classifierWrapper(classifier, model_type, curFV) """ class_id = -1 probability = -1 if classifier_type == "knn": class_id, probability = classifier.classify(test_sample) elif classifier_type == "svm" or \ classifier_type == "randomforest" or \ classifier_type == "gradientboosting" or \ classifier_type == "extratrees" or \ classifier_type == "svm_rbf": class_id = classifier.predict(test_sample.reshape(1, -1))[0] probability = classifier.predict_proba(test_sample.reshape(1, -1))[0] return class_id, probability def regression_wrapper(model, model_type, test_sample): """ This function is used as a wrapper to pattern classification. ARGUMENTS: - model: regression model - model_type: "svm" or "knn" (TODO) - test_sample: a feature vector (np array) RETURNS: - R: regression result (estimated value) EXAMPLE (for some audio signal stored in array x): TODO """ if model_type == "svm" or model_type == "randomforest" or \ model_type == "svm_rbf": return model.predict(test_sample.reshape(1, -1))[0] # elif classifier_type == "knn": # TODO def train_knn(features, labels, neighbors): """ Train a kNN classifier. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - neighbors: parameter K RETURNS: - kNN: the trained kNN variable """ knn = Knn(features, labels, neighbors) return knn def train_svm(features, labels, c_param, kernel='linear'): """ Train a multi-class probabilitistic SVM classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - c_param: SVM parameter C (cost of constraints violation) RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. """ svm = sklearn.svm.SVC(C=c_param, kernel=kernel, probability=True, gamma='auto') svm.fit(features, labels) return svm def train_random_forest(features, labels, n_estimators): """ Train a multi-class random forest classifier. Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - n_estimators: number of trees in the forest RETURNS: - rf: the trained random forest """ rf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_estimators) rf.fit(features, labels) return rf def train_gradient_boosting(features, labels, n_estimators): """ Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest - n_estimators: number of trees in the forest RETURNS: - rf: the trained model """ rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=n_estimators) rf.fit(features, labels) return rf def train_extra_trees(features, labels, n_estimators): """ Train an extra tree Note: This function is simply a wrapper to the sklearn functionality for model training. See function extract_features_and_train() to use a wrapper on both the feature extraction and the model training (and parameter tuning) processes. ARGUMENTS: - features: a feature matrix [n_samples x numOfDimensions] - labels: a label matrix: [n_samples x 1] - n_estimators: number of trees in the forest RETURNS: - et: the trained model """ et = sklearn.ensemble.ExtraTreesClassifier(n_estimators=n_estimators) et.fit(features, labels) return et def train_svm_regression(features, labels, c_param, kernel='linear'): svm = sklearn.svm.SVR(C=c_param, kernel=kernel) svm.fit(features, labels) train_err = np.mean(np.abs(svm.predict(features) - labels)) return svm, train_err def train_random_forest_regression(features, labels, n_estimators): rf = sklearn.ensemble.RandomForestRegressor(n_estimators=n_estimators) rf.fit(features, labels) train_err = np.mean(np.abs(rf.predict(features) - labels)) return rf, train_err def extract_features_and_train(paths, mid_window, mid_step, short_window, short_step, classifier_type, model_name, compute_beat=False, train_percentage=0.90, dict_of_ids=None, use_smote=False): """ This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: paths: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files. mid_window, mid_step: mid-term window length and step short_window, short_step: short-term window and step classifier_type: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees" model_name: name of the model to be saved dict_of_ids: a dictionary which has as keys the full path of audio files and as values the respective group ids RETURNS: None. Resulting classifier along with the respective model parameters are saved on files. """ # STEP A: Feature Extraction: features, class_names, file_names = \ aF.multiple_directory_feature_extraction(paths, mid_window, mid_step, short_window, short_step, compute_beat=compute_beat) file_names = [item for sublist in file_names for item in sublist] if dict_of_ids: list_of_ids = [dict_of_ids[file] for file in file_names] else: list_of_ids = None if len(features) == 0: print("trainSVM_feature ERROR: No data found in any input folder!") return n_feats = features[0].shape[1] feature_names = ["features" + str(d + 1) for d in range(n_feats)] for i, feat in enumerate(features): if len(feat) == 0: print("trainSVM_feature ERROR: " + paths[i] + " folder is empty or non-existing!") return # STEP B: classifier Evaluation and Parameter Selection: if classifier_type == "svm" or classifier_type == "svm_rbf": classifier_par = np.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0, 20.0]) elif classifier_type == "randomforest": classifier_par = np.array([10, 25, 50, 100, 200, 500]) elif classifier_type == "knn": classifier_par = np.array([1, 3, 5, 7, 9, 11, 13, 15]) elif classifier_type == "gradientboosting": classifier_par = np.array([10, 25, 50, 100, 200, 500]) elif classifier_type == "extratrees": classifier_par = np.array([10, 25, 50, 100, 200, 500]) # get optimal classifier parameter: temp_features = [] for feat in features: temp = [] for i in range(feat.shape[0]): temp_fv = feat[i, :] if (not np.isnan(temp_fv).any()) and (not np.isinf(temp_fv).any()): temp.append(temp_fv.tolist()) else: print("NaN Found! Feature vector not used for training") temp_features.append(np.array(temp)) features = temp_features best_param = evaluate_classifier(features, class_names, classifier_type, classifier_par, 0, list_of_ids, n_exp=-1, train_percentage=train_percentage, smote=use_smote) print("Selected params: {0:.5f}".format(best_param)) # STEP C: Train and Save the classifier to file # First Use mean/std standard feature scaling: features, labels = features_to_matrix(features) scaler = StandardScaler() features = scaler.fit_transform(features) mean = scaler.mean_.tolist() std = scaler.scale_.tolist() # Then train the final classifier if classifier_type == "svm": classifier = train_svm(features, labels, best_param) elif classifier_type == "svm_rbf": classifier = train_svm(features, labels, best_param, kernel='rbf') elif classifier_type == "randomforest": classifier = train_random_forest(features, labels, best_param) elif classifier_type == "gradientboosting": classifier = train_gradient_boosting(features, labels, best_param) elif classifier_type == "extratrees": classifier = train_extra_trees(features, labels, best_param) # And save the model to a file, along with # - the scaling -mean/std- vectors) # - the feature extraction parameters if classifier_type == "knn": feature_matrix = features.tolist() labels = labels.tolist() save_path = model_name save_parameters(save_path, feature_matrix, labels, mean, std, class_names, best_param, mid_window, mid_step, short_window, short_step, compute_beat) elif classifier_type == "svm" or classifier_type == "svm_rbf" or \ classifier_type == "randomforest" or \ classifier_type == "gradientboosting" or \ classifier_type == "extratrees": with open(model_name, 'wb') as fid: cPickle.dump(classifier, fid) save_path = model_name + "MEANS" save_parameters(save_path, mean, std, class_names, mid_window, mid_step, short_window, short_step, compute_beat) def save_parameters(path, *parameters): with open(path, 'wb') as file_handle: for param in parameters: cPickle.dump(param, file_handle, protocol=cPickle.HIGHEST_PROTOCOL) def feature_extraction_train_regression(folder_name, mid_window, mid_step, short_window, short_step, model_type, model_name, compute_beat=False): """ This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: folder_name: path of directory containing the WAV files and Regression CSVs mt_win, mt_step: mid-term window length and step st_win, st_step: short-term window and step model_type: "svm" or "knn" or "randomforest" model_name: name of the model to be saved RETURNS: None. Resulting regression model along with the respective model parameters are saved on files. """ # STEP A: Feature Extraction: features, _, filenames = \ aF.multiple_directory_feature_extraction([folder_name], mid_window, mid_step, short_window, short_step, compute_beat=compute_beat) features = features[0] filenames = [ntpath.basename(f) for f in filenames[0]] f_final = [] # Read CSVs: csv_files = glob.glob(folder_name + os.sep + "*.csv") regression_labels = [] regression_names = [] f_final = [] for c in csv_files: cur_regression_labels = [] f_temp = [] # open the csv file that contains the current target value's annotations with open(c, 'rt') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in csv_reader: if len(row) == 2: # ... and if the current filename exists # in the list of filenames if row[0] in filenames: index = filenames.index(row[0]) cur_regression_labels.append(float(row[1])) f_temp.append(features[index, :]) else: print("Warning: {} not found " "in list of files.".format(row[0])) else: print("Warning: Row with unknown format in regression file") f_final.append(np.array(f_temp)) # cur_regression_labels is the list of values # for the current regression problem regression_labels.append(np.array(cur_regression_labels)) # regression task name regression_names.append(ntpath.basename(c).replace(".csv", "")) if len(features) == 0: print("ERROR: No data found in any input folder!") return # STEP B: classifier Evaluation and Parameter Selection: if model_type == "svm" or model_type == "svm_rbf": model_params = np.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0]) elif model_type == "randomforest": model_params = np.array([5, 10, 25, 50, 100]) errors = [] errors_base = [] best_params = [] for iRegression, r in enumerate(regression_names): # get optimal classifeir parameter: print("Regression task " + r) bestParam, error, berror = evaluate_regression(f_final[iRegression], regression_labels[ iRegression], 100, model_type, model_params) errors.append(error) errors_base.append(berror) best_params.append(bestParam) print("Selected params: {0:.5f}".format(bestParam)) # scale the features (mean-std) and keep the mean/std parameters # to be saved with the model scaler = StandardScaler() features_norm = scaler.fit_transform(f_final[iRegression]) mean = scaler.mean_.tolist() std = scaler.scale_.tolist() # STEP C: Save the model to file if model_type == "svm": classifier, _ = train_svm_regression(features_norm, regression_labels[iRegression], bestParam) if model_type == "svm_rbf": classifier, _ = train_svm_regression(features_norm, regression_labels[iRegression], bestParam, kernel='rbf') if model_type == "randomforest": classifier, _ = train_random_forest_regression(features_norm, regression_labels[ iRegression], bestParam) # Save the model to a file, along with # - the scaling -mean/std- vectors) # - the feature extraction parameters if model_type == "svm" or model_type == "svm_rbf" \ or model_type == "randomforest": with open(model_name + "_" + r, 'wb') as fid: cPickle.dump(classifier, fid) save_path = model_name + "_" + r + "MEANS" save_parameters(save_path, mean, std, mid_window, mid_step, short_window, short_step, compute_beat) return errors, errors_base, best_params def load_model_knn(knn_model_name, is_regression=False): with open(knn_model_name, "rb") as fo: features = cPickle.load(fo) labels = cPickle.load(fo) mean = cPickle.load(fo) std = cPickle.load(fo) if not is_regression: classes = cPickle.load(fo) neighbors = cPickle.load(fo) mid_window = cPickle.load(fo) mid_step = cPickle.load(fo) short_window = cPickle.load(fo) short_step = cPickle.load(fo) compute_beat = cPickle.load(fo) features = np.array(features) labels = np.array(labels) mean = np.array(mean) std = np.array(std) classifier = Knn(features, labels, neighbors) # Note: a direct call to the kNN constructor is used here if is_regression: return classifier, mean, std, mid_window, mid_step, short_window, \ short_step, compute_beat else: return classifier, mean, std, classes, mid_window, mid_step, \ short_window, short_step, compute_beat def load_model(model_name, is_regression=False): """ This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodel_name: the path of the model to be loaded - is_regression: a flag indigating whereas this model is regression or not """ with open(model_name + "MEANS", "rb") as fo: mean = cPickle.load(fo) std = cPickle.load(fo) if not is_regression: classNames = cPickle.load(fo) mid_window = cPickle.load(fo) mid_step = cPickle.load(fo) short_window = cPickle.load(fo) short_step = cPickle.load(fo) compute_beat = cPickle.load(fo) mean = np.array(mean) std = np.array(std) with open(model_name, 'rb') as fid: svm_model = cPickle.load(fid) if is_regression: return svm_model, mean, std, mid_window, mid_step, short_window, \ short_step, compute_beat else: return svm_model, mean, std, classNames, mid_window, mid_step, \ short_window, short_step, compute_beat def group_split(X, y, train_indeces, test_indeces, split_id): """ This function splits the data in train and test set according to train/test indeces based on LeaveOneGroupOut ARGUMENTS: X: array-like of shape (n_samples, n_features) y: array-like of shape (n_samples,) train_indeces: The training set indices test_indeces: The testing set indices split_id: the split number RETURNS: List containing train-test split of inputs. """ train_index = train_indeces[split_id] test_index = test_indeces[split_id] X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] return X_train, X_test, y_train, y_test def evaluate_classifier(features, class_names, classifier_name, params, parameter_mode, list_of_ids=None, n_exp=-1, train_percentage=0.90, smote=False): """ ARGUMENTS: features: a list ([numOfClasses x 1]) whose elements containt np matrices of features. Each matrix features[i] of class i is [n_samples x numOfDimensions] class_names: list of class names (strings) classifier_name: svm or knn or randomforest params: list of classifier parameters (for parameter tuning during cross-validation) parameter_mode: 0: choose parameters that lead to maximum overall classification ACCURACY 1: choose parameters that lead to maximum overall f1 MEASURE n_exp: number of cross-validation experiments (use -1 for auto calculation based on the num of samples) train_percentage: percentage of training (vs validation) data default 0.90 RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure """ # transcode list of feature matrices to X, y (sklearn) X, y = features_to_matrix(features) # features_norm = features; n_classes = len(features) ac_all = [] f1_all = [] pre_class_all = [] rec_classes_all = [] f1_classes_all = [] cms_all = [] # dynamically compute total number of samples: # (so that if number of samples is >10K only one train-val repetition # is performed) n_samples_total = X.shape[0] if n_exp == -1: n_exp = int(50000 / n_samples_total) + 1 if list_of_ids: train_indeces, test_indeces = [], [] gss = GroupShuffleSplit(n_splits=n_exp, train_size=.8) for train_index, test_index in gss.split(X, y, list_of_ids): train_indeces.append(train_index) test_indeces.append(test_index) for Ci, C in enumerate(params): # for each param value cm = np.zeros((n_classes, n_classes)) f1_per_exp = [] r1t_all = [] p1t_all = [] y_pred_all = [] y_test_all = [] for e in range(n_exp): y_pred, y_real = [], [] # for each cross-validation iteration: print("Param = {0:.5f} - classifier Evaluation " "Experiment {1:d} of {2:d}".format(C, e+1, n_exp)) # split features: if list_of_ids: X_train, X_test, y_train, y_test = group_split( X, y, train_indeces, test_indeces, e) else: X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=1-train_percentage) # mean/std scale the features: scaler = StandardScaler() if smote: sm = SMOTE(random_state=2) #sm = RandomUnderSampler(random_state=0) X_train, y_train = sm.fit_resample(X_train, y_train) scaler.fit(X_train) X_train = scaler.transform(X_train) # train multi-class svms: if classifier_name == "svm": classifier = train_svm(X_train, y_train, C) elif classifier_name == "svm_rbf": classifier = train_svm(X_train, y_train, C, kernel='rbf') elif classifier_name == "knn": classifier = train_knn(X_train, y_train, C) elif classifier_name == "randomforest": classifier = train_random_forest(X_train, y_train, C) elif classifier_name == "gradientboosting": classifier = train_gradient_boosting(X_train, y_train, C) elif classifier_name == "extratrees": classifier = train_extra_trees(X_train, y_train, C) # get predictions and compute current comfusion matrix cmt = np.zeros((n_classes, n_classes)) X_test = scaler.transform(X_test) for i_test_sample in range(X_test.shape[0]): y_pred.append(classifier_wrapper(classifier, classifier_name, X_test[i_test_sample, :])[0]) cmt = sklearn.metrics.confusion_matrix(y_test, y_pred) f1t = sklearn.metrics.f1_score(y_test, y_pred, average='macro') r1t_all.append(sklearn.metrics.recall_score( y_test, y_pred, average='macro')) p1t_all.append(sklearn.metrics.precision_score( y_test, y_pred, average='macro')) y_pred_all += y_pred y_test_all += y_test.tolist() f1_per_exp.append(f1t) if cmt.size != cm.size: all_classes = set(y) split_classes = set(y_test) missing_classes = all_classes.difference(split_classes) missing_classes = list(missing_classes) missing_classes = [int(x) for x in missing_classes] cmt = np.insert(cmt, missing_classes, 0, axis=0) cmt = np.insert(cmt, missing_classes, 0, axis=1) cm = cm + cmt cm = cm + 0.0000000010 rec = np.array([cm[ci, ci] / np.sum(cm[ci, :]) for ci in range(cm.shape[0])]) pre = np.array([cm[ci, ci] / np.sum(cm[:, ci]) for ci in range(cm.shape[0])]) pre_class_all.append(pre) rec_classes_all.append(rec) f1 = 2 * rec * pre / (rec + pre) # this is just for debugging (it should be equal to f1) f1_b = sklearn.metrics.f1_score(y_test_all, y_pred_all, average='macro') # Note: np.mean(f1_per_exp) will not be exacty equal to the # overall f1 (i.e. f1 and f1_b because these are calculated on a # per-sample basis) f1_std = np.std(f1_per_exp) print(np.mean(f1), f1_b, f1_std) f1_classes_all.append(f1) ac_all.append(np.sum(np.diagonal(cm)) / np.sum(cm)) cms_all.append(cm) f1_all.append(np.mean(f1)) print("\t\t", end="") for i, c in enumerate(class_names): if i == len(class_names)-1: print("{0:s}\t\t".format(c), end="") else: print("{0:s}\t\t\t".format(c), end="") print("OVERALL") print("\tC", end="") for c in class_names: print("\tPRE\tREC\tf1", end="") print("\t{0:s}\t{1:s}".format("ACC", "f1")) best_ac_ind = np.argmax(ac_all) best_f1_ind = np.argmax(f1_all) for i in range(len(pre_class_all)): print("\t{0:.3f}".format(params[i]), end="") for c in range(len(pre_class_all[i])): print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * pre_class_all[i][c], 100.0 * rec_classes_all[i][c], 100.0 * f1_classes_all[i][c]), end="") print("\t{0:.1f}\t{1:.1f}".format(100.0 * ac_all[i], 100.0 * f1_all[i]), end="") if i == best_f1_ind: print("\t best f1", end="") if i == best_ac_ind: print("\t best Acc", end="") print("") if parameter_mode == 0: # keep parameters that maximize overall classification accuracy: print("Confusion Matrix:") print_confusion_matrix(cms_all[best_ac_ind], class_names) return params[best_ac_ind] elif parameter_mode == 1: # keep parameters that maximize overall f1 measure: print("Confusion Matrix:") print_confusion_matrix(cms_all[best_f1_ind], class_names) return params[best_f1_ind] def evaluate_regression(features, labels, n_exp, method_name, params): """ ARGUMENTS: features: np matrices of features [n_samples x numOfDimensions] labels: list of sample labels n_exp: number of cross-validation experiments method_name: "svm" or "randomforest" params: list of classifier params to be evaluated RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure """ # mean/std feature scaling: scaler = StandardScaler() features_norm = scaler.fit_transform(features) n_samples = labels.shape[0] per_train = 0.9 errors_all = [] er_train_all = [] er_base_all = [] for Ci, C in enumerate(params): # for each param value errors = [] errors_train = [] errors_baseline = [] for e in range(n_exp): # for each cross-validation iteration: # split features: randperm = np.random.permutation(range(n_samples)) n_train = int(round(per_train * n_samples)) f_train = [features_norm[randperm[i]] for i in range(n_train)] f_test = [features_norm[randperm[i+n_train]] for i in range(n_samples - n_train)] l_train = [labels[randperm[i]] for i in range(n_train)] l_test = [labels[randperm[i + n_train]] for i in range(n_samples - n_train)] # train multi-class svms: f_train = np.matrix(f_train) if method_name == "svm": classifier, train_err = \ train_svm_regression(f_train, l_train, C) elif method_name == "svm_rbf": classifier, train_err = \ train_svm_regression(f_train, l_train, C, kernel='rbf') elif method_name == "randomforest": classifier, train_err = \ train_random_forest_regression(f_train, l_train, C) error_test = [] error_test_baseline = [] for itest, fTest in enumerate(f_test): R = regression_wrapper(classifier, method_name, fTest) Rbaseline = np.mean(l_train) error_test.append((R - l_test[itest]) * (R - l_test[itest])) error_test_baseline.append((Rbaseline - l_test[itest]) * (Rbaseline - l_test[itest])) error = np.array(error_test).mean() error_baseline = np.array(error_test_baseline).mean() errors.append(error) errors_train.append(train_err) errors_baseline.append(error_baseline) errors_all.append(np.array(errors).mean()) er_train_all.append(np.array(errors_train).mean()) er_base_all.append(np.array(errors_baseline).mean()) best_ind = np.argmin(errors_all) print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE")) for i in range(len(errors_all)): print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(params[i], errors_all[i], er_train_all[i], er_base_all[i]), end="") if i == best_ind: print("\t\t best", end="") print("") return params[best_ind], errors_all[best_ind], er_base_all[best_ind] def print_confusion_matrix(cm, class_names): """ This function prints a confusion matrix for a particular classification task. ARGUMENTS: cm: a 2-D np array of the confusion matrix (cm[i,j] is the number of times a sample from class i was classified in class j) class_names: a list that contains the names of the classes """ if cm.shape[0] != len(class_names): print("printConfusionMatrix: Wrong argument sizes\n") return for c in class_names: if len(c) > 4: c = c[0:3] print("\t{0:s}".format(c), end="") print("") for i, c in enumerate(class_names): if len(c) > 4: c = c[0:3] print("{0:s}".format(c), end="") for j in range(len(class_names)): print("\t{0:.2f}".format(100.0 * cm[i][j] / np.sum(cm)), end="") print("") def features_to_matrix(features): """ features_to_matrix(features) This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels. ARGUMENTS: - features: a list of feature matrices RETURNS: - feature_matrix: a concatenated matrix of features - labels: a vector of class indices """ labels = np.array([]) feature_matrix = np.array([]) for i, f in enumerate(features): if i == 0: feature_matrix = f labels = i * np.ones((len(f), 1)) else: feature_matrix = np.vstack((feature_matrix, f)) labels = np.append(labels, i * np.ones((len(f), 1))) return feature_matrix, labels def pca_wrapper(features, dimensions): features, labels = features_to_matrix(features) pca = sklearn.decomposition.PCA(n_components=dimensions) pca.fit(features) coeff = pca.components_ coeff = coeff[:, 0:dimensions] features_transformed = [] for f in features: ft = f.copy() # ft = pca.transform(ft, k=nDims) ft = np.dot(f, coeff) features_transformed.append(ft) return features_transformed, coeff def compute_class_rec_pre_f1(c_mat): """ Gets recall, precision and f1 PER CLASS, given the confusion matrix :param c_mat: the [n_class x n_class] confusion matrix :return: rec, pre and f1 for each class """ n_class = c_mat.shape[0] rec, pre, f1 = [], [], [] for i in range(n_class): rec.append(float(c_mat[i, i]) / np.sum(c_mat[i, :])) pre.append(float(c_mat[i, i]) / np.sum(c_mat[:, i])) f1.append(2 * rec[-1] * pre[-1] / (rec[-1] + pre[-1])) return rec, pre, f1 def evaluate_model_for_folders(input_test_folders, model_name, model_type, positive_class, plot=True): """ evaluate_model_for_folders(input_test_folders, model_name, model_type) This function evaluates a model by computing the confusion matrix, the per class performance metrics and by generating a ROC and Precision / Recall diagrams (for a particular class of interest), for a given test dataset. The dataset needs to be organized in folders (one folder per audio class), exactly like in extract_features_and_train() :param input_test_folders: list of folders (each folder represents a separate audio class) :param model_name: path to the model to be tested :param model_type: type of the model :param positive_class name of the positive class :param plot (True default) if to plot 2 diagrams on plotly :return: thr_prre, pre, rec (thresholds, precision recall values) thr_roc, fpr, tpr (thresholds, false positive , true positive rates) Usage example: from pyAudioAnalysis import audioTrainTest as aT thr_prre, pre, rec, thr_roc, fpr, tpr = aT.evaluate_model_for_folders(["4_classes_small/speech", "4_classes_small/music"], "data/models/svm_rbf_4class", "svm_rbf", "speech") """ class_names = [] y_true_binary = [] y_true = [] y_pred = [] probs_positive = [] for i, d in enumerate(input_test_folders): if d[-1] == os.sep: class_names.append(d.split(os.sep)[-2]) else: class_names.append(d.split(os.sep)[-1]) types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(d, files))) # get list of audio files for current folder and run classifier for w in wav_file_list: c, p, probs_names = file_classification(w, model_name, model_type) y_pred.append(c) y_true.append(probs_names.index(class_names[i])) if i == probs_names.index(positive_class): y_true_binary.append(1) else: y_true_binary.append(0) prob_positive = p[probs_names.index(positive_class)] probs_positive.append(prob_positive) pre, rec, thr_prre = sklearn.metrics.precision_recall_curve(y_true_binary, probs_positive) fpr, tpr, thr_roc = sklearn.metrics.roc_curve( y_true_binary, probs_positive) cm = sklearn.metrics.confusion_matrix(y_true, y_pred) rec_c, pre_c, f1_c = compute_class_rec_pre_f1(cm) f1 = (sklearn.metrics.f1_score(y_true, y_pred, average='macro')) acc = (sklearn.metrics.accuracy_score(y_true, y_pred)) print(cm) print(rec_c, pre_c, f1_c, f1, acc) if plot: titles = ["Confusion matrix, acc = {0:.1f}%, " " F1 (macro): {1:.1f}%".format(100 * acc, 100 * f1), "Class-wise Performance measures", "Pre vs Rec for " + positive_class, "ROC for " + positive_class] figs = plotly.subplots.make_subplots(rows=2, cols=2, subplot_titles=titles) heatmap = go.Heatmap(z=np.flip(cm, axis=0), x=class_names, y=list(reversed(class_names)), colorscale=[[0, '#4422ff'], [1, '#ff4422']], name="confusin matrix", showscale=False) mark_prop1 = dict(color='rgba(80, 220, 150, 0.5)', line=dict(color='rgba(80, 220, 150, 1)', width=2)) mark_prop2 = dict(color='rgba(80, 150, 220, 0.5)', line=dict(color='rgba(80, 150, 220, 1)', width=2)) mark_prop3 = dict(color='rgba(250, 150, 150, 0.5)', line=dict(color='rgba(250, 150, 150, 1)', width=3)) b1 = go.Bar(x=class_names, y=rec_c, name="Recall", marker=mark_prop1) b2 = go.Bar(x=class_names, y=pre_c, name="Precision", marker=mark_prop2) b3 = go.Bar(x=class_names, y=f1_c, name="F1", marker=mark_prop3) figs.append_trace(heatmap, 1, 1) figs.append_trace(b1, 1, 2) figs.append_trace(b2, 1, 2) figs.append_trace(b3, 1, 2) figs.append_trace(go.Scatter(x=thr_prre, y=pre, name="Precision", marker=mark_prop1), 2, 1) figs.append_trace(go.Scatter(x=thr_prre, y=rec, name="Recall", marker=mark_prop2), 2, 1) figs.append_trace(go.Scatter(x=fpr, y=tpr, showlegend=False), 2, 2) figs.update_xaxes(title_text="threshold", row=2, col=1) figs.update_xaxes(title_text="false positive rate", row=2, col=2) figs.update_yaxes(title_text="true positive rate", row=2, col=2) plotly.offline.plot(figs, filename="temp.html", auto_open=True) return cm, thr_prre, pre, rec, thr_roc, fpr, tpr def file_classification(input_file, model_name, model_type): # Load classifier: if not os.path.isfile(model_name): print("fileClassification: input model_name not found!") return -1, -1, -1 if isinstance(input_file, str) and not os.path.isfile(input_file): print("fileClassification: wav file not found!") return -1, -1, -1 if model_type == 'knn': classifier, mean, std, classes, mid_window, mid_step, short_window, \ short_step, compute_beat = load_model_knn(model_name) else: classifier, mean, std, classes, mid_window, mid_step, short_window, \ short_step, compute_beat = load_model(model_name) # read audio file and convert to mono sampling_rate, signal = audioBasicIO.read_audio_file(input_file) signal = audioBasicIO.stereo_to_mono(signal) if sampling_rate == 0: # audio file IO problem return -1, -1, -1 if signal.shape[0] / float(sampling_rate) < mid_window: mid_window = signal.shape[0] / float(sampling_rate) # feature extraction: mid_features, s, _ = \ aF.mid_feature_extraction(signal, sampling_rate, mid_window * sampling_rate, mid_step * sampling_rate, round(sampling_rate * short_window), round(sampling_rate * short_step)) # long term averaging of mid-term statistics mid_features = mid_features.mean(axis=1) if compute_beat: beat, beat_conf = aF.beat_extraction(s, short_step) mid_features = np.append(mid_features, beat) mid_features = np.append(mid_features, beat_conf) feature_vector = (mid_features - mean) / std # normalization # classification class_id, probability = classifier_wrapper(classifier, model_type, feature_vector) return class_id, probability, classes def file_regression(input_file, model_name, model_type): # Load classifier: if not os.path.isfile(input_file): print("fileClassification: wav file not found!") return -1, -1, -1 regression_models = glob.glob(model_name + "_*") regression_models2 = [] for r in regression_models: if r[-5::] != "MEANS": regression_models2.append(r) regression_models = regression_models2 regression_names = [] for r in regression_models: regression_names.append(r[r.rfind("_")+1::]) # FEATURE EXTRACTION # LOAD ONLY THE FIRST MODEL (for mt_win, etc) if model_type == 'svm' or model_type == "svm_rbf" or \ model_type == 'randomforest': _, _, _, mid_window, mid_step, short_window, short_step, compute_beat \ = load_model(regression_models[0], True) # read audio file and convert to mono samping_rate, signal = audioBasicIO.read_audio_file(input_file) signal = audioBasicIO.stereo_to_mono(signal) # feature extraction: mid_features, s, _ = \ aF.mid_feature_extraction(signal, samping_rate, mid_window * samping_rate, mid_step * samping_rate, round(samping_rate * short_window), round(samping_rate * short_step)) # long term averaging of mid-term statistics mid_features = mid_features.mean(axis=1) if compute_beat: beat, beat_conf = aF.beat_extraction(s, short_step) mid_features = np.append(mid_features, beat) mid_features = np.append(mid_features, beat_conf) # REGRESSION R = [] for ir, r in enumerate(regression_models): if not os.path.isfile(r): print("fileClassification: input model_name not found!") return (-1, -1, -1) if model_type == 'svm' or model_type == "svm_rbf" \ or model_type == 'randomforest': model, mean, std, _, _, _, _, _ = load_model(r, True) curFV = (mid_features - mean) / std # normalization # classification R.append(regression_wrapper(model, model_type, curFV)) return R, regression_names def lda(data, labels, red_dim): # Centre data data -= data.mean(axis=0) n_data = np.shape(data)[0] n_dim = np.shape(data)[1] Sw = np.zeros((n_dim, n_dim)) C = np.cov((data.T)) # Loop over classes classes = np.unique(labels) for i in range(len(classes)): # Find relevant datapoints indices = (np.where(labels == classes[i])) d = np.squeeze(data[indices, :]) classcov = np.cov((d.T)) Sw += float(np.shape(indices)[0])/n_data * classcov Sb = C - Sw # Now solve for W # Compute eigenvalues, eigenvectors and sort into order evals, evecs = la.eig(Sw, Sb) indices = np.argsort(evals) indices = indices[::-1] evecs = evecs[:, indices] w = evecs[:, :red_dim] new_data = np.dot(data, w) return new_data, w def train_speaker_models(): """ This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included) import audioTrainTest as aT aT.trainSpeakerModelsScript() """ mt_win = 2.0 mt_step = 2.0 st_win = 0.020 st_step = 0.020 dir_name = "DIARIZATION_ALL/all" list_of_dirs = [os.path.join(dir_name, name) for name in os.listdir(dir_name) if os.path.isdir(os.path.join(dir_name, name))] extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step, "knn", "data/knnSpeakerAll", compute_beat=False, train_percentage=0.50) dir_name = "DIARIZATION_ALL/female_male" list_of_dirs = [os.path.join(dir_name, name) for name in os.listdir(dir_name) if os.path.isdir(os.path.join(dir_name, name))] extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step, "knn", "data/knnSpeakerFemaleMale", compute_beat=False, train_percentage=0.50) def main(argv): return 0 if __name__ == '__main__': main(sys.argv)
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Prints keywords """ __revision__ = "$Id$" import cgi import six from urllib import quote from invenio.base.globals import cfg def format_element(bfo, keyword_prefix, keyword_suffix, separator=' ; ', link='yes'): """ Display keywords of the record. @param keyword_prefix: a prefix before each keyword @param keyword_suffix: a suffix after each keyword @param separator: a separator between keywords @param link: links the keywords if 'yes' (HTML links) """ CFG_SITE_URL = cfg['CFG_SITE_URL'] if isinstance(CFG_SITE_URL, six.text_type): CFG_SITE_URL = CFG_SITE_URL.encode('utf8') keywords = bfo.fields('6531_a') if len(keywords) > 0: if link == 'yes': keywords = ['<a href="' + CFG_SITE_URL + '/search?f=keyword&amp;p='+ \ quote('"' + keyword + '"') + \ '&amp;ln='+ bfo.lang+ \ '">' + cgi.escape(keyword) + '</a>' for keyword in keywords] else: keywords = [cgi.escape(keyword) for keyword in keywords] keywords = [keyword_prefix + keyword + keyword_suffix for keyword in keywords] return separator.join(keywords) def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0 formatter: unicode decoding error fix * Fixes unicode decoding error in bfe_keyword bibformat element. Signed-off-by: Lars Holm Nielsen <7eaffca9f7cd98839b5821d0d23e09ff764b82cd@cern.ch> # -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Prints keywords """ __revision__ = "$Id$" import cgi import six from urllib import quote from invenio.base.globals import cfg def format_element(bfo, keyword_prefix, keyword_suffix, separator=' ; ', link='yes'): """ Display keywords of the record. @param keyword_prefix: a prefix before each keyword @param keyword_suffix: a suffix after each keyword @param separator: a separator between keywords @param link: links the keywords if 'yes' (HTML links) """ CFG_SITE_URL = cfg['CFG_SITE_URL'] if isinstance(CFG_SITE_URL, six.text_type): CFG_SITE_URL = CFG_SITE_URL.encode('utf8') keywords = bfo.fields('6531_a') if len(keywords) > 0: if link == 'yes': keywords = [ '<a href="' + CFG_SITE_URL + '/search?f=keyword&amp;p=' + quote('"' + keyword + '"') + '&amp;ln=' + str(bfo.lang) + '">' + cgi.escape(keyword) + '</a>' for keyword in keywords] else: keywords = [cgi.escape(keyword) for keyword in keywords] keywords = [keyword_prefix + keyword + keyword_suffix for keyword in keywords] return separator.join(keywords) def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0
""" puzzlesolver_compound_tests.py :created on: 20160701 __author__ = 'Frederic Dupont' :License: GPL3 """ import unittest from sudoku.puzzle import Puzzle, make_grid_from_string # from sudoku.puzzleconstants import DIGITS, SQUARES from sudoku.puzzlesolver import PuzzleSolver class TestPuzzleSolver(unittest.TestCase): def setUp(self): # Very Hard --> require search self.g1 = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' self.s1 = '4.....8.5.3..........7......2.....6.....8.4...4..1.......6.3.7.5.32.1...1.4......' self.g2 = '1...895..5....7819........72.4..8.7.9.71.54.8.8.7..3.531.4..78.4682....3..985...1' self.s2 = '172.895.454..2781989.5142.7254938176937165428681742395315496782468271953729853641' # Not so hard - no search needed self.g3 = '003020600900305001001806400008102900700000008006708200002609500800203009005010300' self.s3 = '483921657967345821251876493548132976729564138136798245372689514814253769695417382' self.g4 = '.82...59....8.1..3..52...78...37842...........27945...91...68..2..7.9....73...95.' self.s4 = '382467591796851243145293678561378429439612785827945316914526837258739164673184952' self.g5 = '437...189.6.183.......9.536.73..1...9..4.7..1...5..69.124.7.......645.1.356...974' self.s5 = '437256189569183742812794536673921458985467321241538697124379865798645213356812974' self.g6 = '.6..7.4..5......12...1....7..5.4.27..3.....4..19.6.8...4...1...79.6....8..1.3..2.' self.s6 = '163275489578496312924183567685349271237518946419762853346821795792654138851937624' self.g7 = '7.9..3.85812..6.375.478..1.3.196..5.426..789...52..6.1143.7.56....3..1.8......3..' self.s7 = '769123485812456937534789216381964752426517893975238641143872569697345128258691374' self.g8 = '31.....78526.9.13.8..6.359228.4.9..1..3.862..6..1.5..3...2.13467.1...8...6...891.' self.s8 = '319542678526897134874613592285439761143786259697125483958271346731964825462358917' self.g9 = '.1869.....5....71..7..3..62.21863...7..5...365...419.88...72.4..47.18.9.....563..' self.s9 = '218697453356284719479135862921863574784529136563741928835972641647318295192456387' self.g10 = '...12.4.5.4.6..7.8.26......47.5...31.........31...8..9.8..1.......2..5.4..24.79.6' self.s10 = '837129465941653728526874193478592631269341857315768249684915372793286514152437986' self.g11 = '.5...478...67.95..2.7.5.1.3.2.....6..981.543..3.....7.1.5..7..8..35.26.7.728...5.' self.s11 = '359214786816739524247658193721483965698175432534926871165397248983542617472861359' # hard puzzle (X wings and requires 5 full iterations to be solved, but no search) self.g12 = '4.......1..6.35.24..984.6.7.3....4..9..68.3.5.1..7.2....542.7.32.....54.......1.2' self.s12 = '423796851786135924159842637538219476972684315614573289865421793291367548347958162' if __name__ == '__main__': unittest.main() Add easy puzzle test case for eliminate_propagate_fill() """ puzzlesolver_compound_tests.py :created on: 20160701 __author__ = 'Frederic Dupont' :License: GPL3 """ import unittest from sudoku.puzzle import Puzzle, make_grid_from_string # from sudoku.puzzleconstants import DIGITS, SQUARES from sudoku.puzzlesolver import PuzzleSolver class TestPuzzleSolver(unittest.TestCase): def setUp(self): # Very Hard --> require search self.g1 = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' self.s1 = '4.....8.5.3..........7......2.....6.....8.4...4..1.......6.3.7.5.32.1...1.4......' self.g2 = '1...895..5....7819........72.4..8.7.9.71.54.8.8.7..3.531.4..78.4682....3..985...1' self.s2 = '172.895.454..2781989.5142.7254938176937165428681742395315496782468271953729853641' # Not so hard - no search needed self.g3 = '003020600900305001001806400008102900700000008006708200002609500800203009005010300' self.s3 = '483921657967345821251876493548132976729564138136798245372689514814253769695417382' self.g4 = '.82...59....8.1..3..52...78...37842...........27945...91...68..2..7.9....73...95.' self.s4 = '382467591796851243145293678561378429439612785827945316914526837258739164673184952' self.g5 = '437...189.6.183.......9.536.73..1...9..4.7..1...5..69.124.7.......645.1.356...974' self.s5 = '437256189569183742812794536673921458985467321241538697124379865798645213356812974' self.g6 = '.6..7.4..5......12...1....7..5.4.27..3.....4..19.6.8...4...1...79.6....8..1.3..2.' self.s6 = '163275489578496312924183567685349271237518946419762853346821795792654138851937624' self.g7 = '7.9..3.85812..6.375.478..1.3.196..5.426..789...52..6.1143.7.56....3..1.8......3..' self.s7 = '769123485812456937534789216381964752426517893975238641143872569697345128258691374' self.g8 = '31.....78526.9.13.8..6.359228.4.9..1..3.862..6..1.5..3...2.13467.1...8...6...891.' self.s8 = '319542678526897134874613592285439761143786259697125483958271346731964825462358917' self.g9 = '.1869.....5....71..7..3..62.21863...7..5...365...419.88...72.4..47.18.9.....563..' self.s9 = '218697453356284719479135862921863574784529136563741928835972641647318295192456387' self.g10 = '...12.4.5.4.6..7.8.26......47.5...31.........31...8..9.8..1.......2..5.4..24.79.6' self.s10 = '837129465941653728526874193478592631269341857315768249684915372793286514152437986' self.g11 = '.5...478...67.95..2.7.5.1.3.2.....6..981.543..3.....7.1.5..7..8..35.26.7.728...5.' self.s11 = '359214786816739524247658193721483965698175432534926871165397248983542617472861359' # hard puzzle (X wings and requires 5 full iterations to be solved, but no search) self.g12 = '4.......1..6.35.24..984.6.7.3....4..9..68.3.5.1..7.2....542.7.32.....54.......1.2' self.s12 = '423796851786135924159842637538219476972684315614573289865421793291367548347958162' @staticmethod def _apply_eliminate_propagate_fill(puzzle_string): """ :return: a tuple containing the result of eliminate_propagate_fill() on the provided string, and a repr of the "solved" puzzle """ grid = make_grid_from_string(puzzle_string) grid.parse_grid_candidates() solver = PuzzleSolver(grid.clone()) result = solver.eliminate_propagate_fill() return result, repr(solver._puzzle) def test_eliminate_propagate_fill_g3(self): puzzle_string, expected_string = self.g3, self.s3 result, resulting_string = TestPuzzleSolver._apply_eliminate_propagate_fill(puzzle_string) self.assertTrue(result) self.assertEqual(expected_string, resulting_string) if __name__ == '__main__': unittest.main()
import json import requests from .fields import BaseField, CharField, ModelField from . import settings class APIModelCollection: def __init__(self, model, api, list_url, get_url): self._items = [] self._model = model self._api = api self._list_url = list_url self._get_url = get_url def list(self): if self._list_url is not None: items = API.authenticated_get_request( request_url=self._list_url, token=self._api.token ) item_dicts = items.json() self._items = [] for item_dict in item_dicts: obj = self._model(api=self._api) obj.set_data(data=item_dict) self._items.append(obj) return self._items def get(self, **kwargs): if self._get_url is not None: keys = {key: value for key, value in kwargs.items()} self._get_url = self._get_url.format(**keys) item = API.authenticated_get_request( request_url=self._get_url, token=self._api.token ) item_dict = item.json() obj = self._model(api=self._api) obj.set_data(data=item_dict) return obj else: return None def add(self, item): if isinstance(item, self._model): self._items.append(item) else: print("Item is not of type: {}".format(self._model)) class APICollaboratorCollection(APIModelCollection): def __init__(self, model, api, list_url, get_url, add_url): super().__init__(model, api, list_url, get_url) self._add_url = add_url def get(self, login): if self._list_url is not None and self._get_url is not None: self._get_url = self._get_url.format(login=login) if len(self._items) is 0: self._items = self.list() if login in [collaborator.login for collaborator in self._items]: item = API.authenticated_get_request( request_url=self._get_url, token=self._api.token ) item_dict = item.json() obj = self._model(api=self._api) obj.set_data(data=item_dict) return obj return None def add(self, item): super().add(item) if item in self._items: if self._add_url is not None: self._add_url = self._add_url.format(login=item.login) API.authenticated_put_request( request_url=self._add_url, token=self._api.token ) def save(self): pass class API: def __init__(self, token): self.token = token @staticmethod def auth_headers(token): return {'access_token': token} @staticmethod def authenticated_get_request(request_url, token): headers = API.auth_headers(token) response = requests.get(request_url, params=headers) return response @staticmethod def authenticated_put_request(request_url, token, data=None): headers = API.auth_headers(token) response = requests.put(url=request_url, data=data, params=headers) return response @staticmethod def authenticated_patch_request(request_url, token, data=None): headers = API.auth_headers(token) response = requests.patch(url=request_url, data=data, params=headers) return response @property def limit(self): limit = API.authenticated_get_request(settings.RATE_LIMIT_URL, self.token) return limit.content @property def repos(self): return APIModelCollection( model=Repository, api=self, list_url=settings.CURRENT_USER_REPOSITORIES_URL, get_url=settings.REPOSITORY_URL) class BaseModel(type): def __new__(cls, name, bases, attrs): fields = {k: v for k, v in attrs.items() if isinstance(v, BaseField)} attrs['_fields'] = fields return type.__new__(cls, name, bases, attrs) class Model(object, metaclass=BaseModel): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def __setattr__(self, key, value): if key in self._fields: field = self._fields[key] field.set(value) field._related_obj = self super(Model, self).__setattr__(key, field.deserialize()) else: super(Model, self).__setattr__(key, value) def to_dict(self): return dict((key, self._fields[key].serialize(getattr(self, key))) for key in self._fields.keys() if hasattr(self, key)) def to_json(self): return json.dumps(self.to_dict()) def set_data(self, data, is_json=False): if is_json: data = json.loads(data) for key in self._fields: if key in data: setattr(self, key, data.get(key)) class APIModel(Model): api = ModelField(API) def _save(self, save_url): response = API.authenticated_patch_request(save_url, token=self.api.token, data=self.to_json()) print(response.content) class User(APIModel): login = CharField() def save(self): self._save(settings.AUTHENTICATED_USER) def __repr__(self): return self.login class Repository(APIModel): name = CharField() full_name = CharField() description = CharField() def save(self): self._save(settings.REPOSITORY_URL.format(full_name=self.full_name)) @property def collaborators(self): return APICollaboratorCollection( model=User, api=self.api, list_url=settings.COLLABORATORS_LIST_URL.format( full_name=self.full_name), get_url=settings.COLLABORATOR_URL, add_url=settings.COLLABORATOR_ADD_URL.format( full_name=self.full_name)) def __repr__(self): return self.full_name Restructure Model collections import json import requests from .fields import BaseField, CharField, ModelField from . import settings class API: def __init__(self, token): self.token = token @staticmethod def auth_headers(token): return {'access_token': token} @staticmethod def authenticated_get_request(request_url, token): headers = API.auth_headers(token) response = requests.get(request_url, params=headers) return response @staticmethod def authenticated_put_request(request_url, token, data=None): headers = API.auth_headers(token) response = requests.put(url=request_url, data=data, params=headers) return response @staticmethod def authenticated_patch_request(request_url, token, data=None): headers = API.auth_headers(token) response = requests.patch(url=request_url, data=data, params=headers) return response @property def limit(self): limit = API.authenticated_get_request(settings.RATE_LIMIT_URL, self.token) return limit.content @property def repos(self): return APIRepositoryCollection(api=self, parent=self) class APIModelCollection: def __init__(self, api, parent=None): self._items = [] self._api = api self.parent = parent class APIRepositoryCollection(APIModelCollection): def list(self): response = API.authenticated_get_request( request_url=settings.CURRENT_USER_REPOSITORIES_URL, token=self._api.token ) item_dicts = response.json() self._items = [] for item_dict in item_dicts: obj = Repository(api=self._api) obj.set_data(data=item_dict) self._items.append(obj) return self._items def get(self, full_name): get_url = settings.REPOSITORY_URL.format(full_name=full_name) response = API.authenticated_get_request( request_url=get_url, token=self._api.token ) item_dict = response.json() obj = Repository(api=self._api) obj.set_data(data=item_dict) return obj class APICollaboratorCollection(APIModelCollection): def list(self): items = API.authenticated_get_request( request_url=settings.COLLABORATORS_LIST_URL.format( full_name=self.parent.full_name), token=self._api.token ) item_dicts = items.json() self._items = [] for item_dict in item_dicts: obj = User(api=self._api) obj.set_data(data=item_dict) self._items.append(obj) return self._items def get(self, login): get_url = settings.COLLABORATOR_URL.format( full_name=self.parent.full_name, login=login ) if len(self._items) is 0: self._items = self.list() if login in [collaborator.login for collaborator in self._items]: item = API.authenticated_get_request( request_url=get_url, token=self._api.token ) item_dict = item.json() obj = User(api=self._api) obj.set_data(data=item_dict) return obj return None def add(self, item): if item in self._items: add_url = settings.COLLABORATOR_ADD_URL.format( full_name=self.parent.full_name, login=item.login) API.authenticated_put_request( request_url=add_url, token=self._api.token ) class BaseModel(type): def __new__(cls, name, bases, attrs): fields = {k: v for k, v in attrs.items() if isinstance(v, BaseField)} attrs['_fields'] = fields return type.__new__(cls, name, bases, attrs) class Model(object, metaclass=BaseModel): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def __setattr__(self, key, value): if key in self._fields: field = self._fields[key] field.set(value) field._related_obj = self super(Model, self).__setattr__(key, field.deserialize()) else: super(Model, self).__setattr__(key, value) def to_dict(self): return dict((key, self._fields[key].serialize(getattr(self, key))) for key in self._fields.keys() if hasattr(self, key)) def to_json(self): return json.dumps(self.to_dict()) def set_data(self, data, is_json=False): if is_json: data = json.loads(data) for key in self._fields: if key in data: setattr(self, key, data.get(key)) class APIModel(Model): api = ModelField(API) def _save(self, save_url): API.authenticated_patch_request(save_url, token=self.api.token, data=self.to_json()) class User(APIModel): login = CharField() def save(self): self._save(settings.AUTHENTICATED_USER) def __repr__(self): return self.login class Repository(APIModel): name = CharField() full_name = CharField() description = CharField() def save(self): self._save(settings.REPOSITORY_URL.format(full_name=self.full_name)) @property def collaborators(self): return APICollaboratorCollection(api=self.api, parent=self) def __repr__(self): return self.full_name
from collections import OrderedDict from django.http import Http404, HttpResponseForbidden from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404 from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied from django.db import IntegrityError from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.template.context_processors import csrf from django.views.decorators.cache import cache_page from .forms import RegisterUserForm, AddMonsterInstanceForm, EditMonsterInstanceForm, AwakenMonsterInstanceForm, \ EditEssenceStorageForm, EditProfileForm from .models import Monster, Summoner, MonsterInstance @cache_page(60 * 10) def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], ) new_user.save() new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) def log_in(request): context = {} context.update(csrf(request)) if request.method == 'POST': username = request.POST['username'] userpass = request.POST['userpass'] user = authenticate(username=username, password=userpass) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') # If the above falls through then the login failed context['login_failure'] = True context['username'] = username # No data POSTed or the above login/auth failed. return render(request, 'herders/login.html', context) def log_out(request): logout(request) return redirect('news:latest_news') def profile(request, profile_name=None, view_mode='list', sort_method='grade'): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in. ') summoner = get_object_or_404(Summoner, user__username=profile_name) # Determine if the person logged in is the one requesting the view is_owner = request.user.is_authenticated() and summoner.user == request.user context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': profile_name, 'is_owner': is_owner, 'view_mode': view_mode, 'sort_method': sort_method, 'return_path': request.path, 'view': 'profile', } if is_owner or summoner.public: if view_mode.lower() == 'list': context['monster_stable'] = MonsterInstance.objects.filter(owner=summoner) return render(request, 'herders/profile/profile_view.html', context) elif view_mode.lower() == 'box': if sort_method == 'grade': monster_stable = OrderedDict() monster_stable['6*'] = MonsterInstance.objects.filter(owner=summoner, stars=6).order_by('-level', 'monster__name') monster_stable['5*'] = MonsterInstance.objects.filter(owner=summoner, stars=5).order_by('-level', 'monster__name') monster_stable['4*'] = MonsterInstance.objects.filter(owner=summoner, stars=4).order_by('-level', 'monster__name') monster_stable['3*'] = MonsterInstance.objects.filter(owner=summoner, stars=3).order_by('-level', 'monster__name') monster_stable['2*'] = MonsterInstance.objects.filter(owner=summoner, stars=2).order_by('-level', 'monster__name') monster_stable['1*'] = MonsterInstance.objects.filter(owner=summoner, stars=1).order_by('-level', 'monster__name') elif sort_method == 'level': monster_stable = OrderedDict() monster_stable['40-31'] = MonsterInstance.objects.filter(owner=summoner, level__gt=30).order_by('-level', '-stars', 'monster__name') monster_stable['30-21'] = MonsterInstance.objects.filter(owner=summoner, level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__name') monster_stable['20-11'] = MonsterInstance.objects.filter(owner=summoner, level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__name') monster_stable['10-1'] = MonsterInstance.objects.filter(owner=summoner, level__lte=10).order_by('-level', '-stars', 'monster__name') elif sort_method == 'attribute': monster_stable = OrderedDict() monster_stable['water'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable return render(request, 'herders/profile/profile_box.html', context) else: raise Http404('Unknown profile view mode') else: return render(request, 'herders/profile/not_public.html') @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = EditProfileForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'is_owner': True, # Because of @login_required decorator 'profile_name': profile_name, 'return_path': return_path, 'profile_form': form, } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) @login_required def profile_storage(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'is_owner': True, 'profile_name': request.user.username, 'storage_form': form, 'view': 'profile', 'profile_view': 'materials', } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/profile/profile_storage.html', context) @login_required() def monster_instance_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = AddMonsterInstanceForm(request.POST or None) if form.is_valid() and request.method == 'POST': # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() return redirect(return_path) else: # Re-show same page but with form filled in and errors shown context = { 'profile_name': profile_name, 'add_monster_form': form, 'return_path': return_path, 'is_owner': True, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_add.html', context) def monster_instance_view(request, profile_name, instance_id): context = { 'view': 'profile', } return render(request, 'herders/unimplemented.html') def monster_instance_edit(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = monster.owner == request.user.summoner form = EditMonsterInstanceForm(request.POST or None, instance=monster) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'edit_monster_form': form, 'view': 'profile', } if request.method == 'POST': if is_owner: if form.is_valid(): form.save() return redirect(return_path) else: # Redisplay form with validation error messages context['validation_errors'] = form.non_field_errors() return render(request, 'herders/profile/profile_monster_edit.html', context) else: raise PermissionDenied() else: return render(request, 'herders/profile/profile_monster_edit.html', context) @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): context = { 'view': 'profile', } return render(request, 'herders/unimplemented.html') @login_required() def monster_instance_awaken(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = monster.owner == request.user.summoner form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': request.user.username, 'is_owner': is_owner, # Because of @login_required decorator 'monster': monster, 'awaken_monster_form': form, } if request.method == 'POST' and form.is_valid() and is_owner: # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) if monster.monster.awaken_magic_mats_high: summoner.storage_magic_high -= monster.monster.awaken_magic_mats_high if monster.monster.awaken_magic_mats_mid: summoner.storage_magic_mid -= monster.monster.awaken_magic_mats_mid if monster.monster.awaken_magic_mats_low: summoner.storage_magic_low -= monster.monster.awaken_magic_mats_low if monster.monster.element == Monster.ELEMENT_FIRE: if monster.monster.awaken_ele_mats_high: summoner.storage_fire_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_fire_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_fire_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WATER: if monster.monster.awaken_ele_mats_high: summoner.storage_water_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_water_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_water_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WIND: if monster.monster.awaken_ele_mats_high: summoner.storage_wind_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_wind_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_wind_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_DARK: if monster.monster.awaken_ele_mats_high: summoner.storage_dark_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_dark_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_dark_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_LIGHT: if monster.monster.awaken_ele_mats_high: summoner.storage_light_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_light_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_light_low -= monster.monster.awaken_ele_mats_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to() monster.save() return redirect(return_path) else: # Retreive list of awakening materials from summoner profile summoner = Summoner.objects.get(user=request.user) available_materials = { 'storage_magic_low': summoner.storage_magic_low, 'storage_magic_mid': summoner.storage_magic_mid, 'storage_magic_high': summoner.storage_magic_high } if monster.monster.element == Monster.ELEMENT_FIRE: available_materials['storage_ele_low'] = summoner.storage_fire_low available_materials['storage_ele_mid'] = summoner.storage_fire_mid available_materials['storage_ele_high'] = summoner.storage_fire_high elif monster.monster.element == Monster.ELEMENT_WATER: available_materials['storage_ele_low'] = summoner.storage_water_low available_materials['storage_ele_mid'] = summoner.storage_water_mid available_materials['storage_ele_high'] = summoner.storage_water_high elif monster.monster.element == Monster.ELEMENT_WIND: available_materials['storage_ele_low'] = summoner.storage_wind_low available_materials['storage_ele_mid'] = summoner.storage_wind_mid available_materials['storage_ele_high'] = summoner.storage_wind_high elif monster.monster.element == Monster.ELEMENT_DARK: available_materials['storage_ele_low'] = summoner.storage_dark_low available_materials['storage_ele_mid'] = summoner.storage_dark_mid available_materials['storage_ele_high'] = summoner.storage_dark_high elif monster.monster.element == Monster.ELEMENT_LIGHT: available_materials['storage_ele_low'] = summoner.storage_light_low available_materials['storage_ele_mid'] = summoner.storage_light_mid available_materials['storage_ele_high'] = summoner.storage_light_high context['available_materials'] = available_materials return render(request, 'herders/profile/profile_awaken.html', context) @login_required def fusion(request, profile_name): context = { 'view': 'fusion', } return render(request, 'herders/unimplemented.html', context) @login_required def teams(request, profile_name): context = { 'view': 'teams', } return render(request, 'herders/unimplemented.html', context) @cache_page(60 * 60) def bestiary(request, monster_element=None): print monster_element context = { 'view': 'bestiary', 'monster_element': monster_element, } if monster_element is not None: if monster_element == 'all': context['monster_list'] = Monster.objects.all() else: context['monster_list'] = get_list_or_404(Monster, element=monster_element) else: context['no_filter'] = True return render(request, 'herders/bestiary.html', context) @cache_page(60 * 60) def bestiary_detail(request, monster_id): context = { 'view': 'bestiary', } return render(request, 'herders/unimplemented.html') Removed bestiary caching until issue with appearing logged in is fixed from collections import OrderedDict from django.http import Http404, HttpResponseForbidden from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404 from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied from django.db import IntegrityError from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.template.context_processors import csrf from django.views.decorators.cache import cache_page from .forms import RegisterUserForm, AddMonsterInstanceForm, EditMonsterInstanceForm, AwakenMonsterInstanceForm, \ EditEssenceStorageForm, EditProfileForm from .models import Monster, Summoner, MonsterInstance @cache_page(60 * 10) def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], ) new_user.save() new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) def log_in(request): context = {} context.update(csrf(request)) if request.method == 'POST': username = request.POST['username'] userpass = request.POST['userpass'] user = authenticate(username=username, password=userpass) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') # If the above falls through then the login failed context['login_failure'] = True context['username'] = username # No data POSTed or the above login/auth failed. return render(request, 'herders/login.html', context) def log_out(request): logout(request) return redirect('news:latest_news') def profile(request, profile_name=None, view_mode='list', sort_method='grade'): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in. ') summoner = get_object_or_404(Summoner, user__username=profile_name) # Determine if the person logged in is the one requesting the view is_owner = request.user.is_authenticated() and summoner.user == request.user context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': profile_name, 'is_owner': is_owner, 'view_mode': view_mode, 'sort_method': sort_method, 'return_path': request.path, 'view': 'profile', } if is_owner or summoner.public: if view_mode.lower() == 'list': context['monster_stable'] = MonsterInstance.objects.filter(owner=summoner) return render(request, 'herders/profile/profile_view.html', context) elif view_mode.lower() == 'box': if sort_method == 'grade': monster_stable = OrderedDict() monster_stable['6*'] = MonsterInstance.objects.filter(owner=summoner, stars=6).order_by('-level', 'monster__name') monster_stable['5*'] = MonsterInstance.objects.filter(owner=summoner, stars=5).order_by('-level', 'monster__name') monster_stable['4*'] = MonsterInstance.objects.filter(owner=summoner, stars=4).order_by('-level', 'monster__name') monster_stable['3*'] = MonsterInstance.objects.filter(owner=summoner, stars=3).order_by('-level', 'monster__name') monster_stable['2*'] = MonsterInstance.objects.filter(owner=summoner, stars=2).order_by('-level', 'monster__name') monster_stable['1*'] = MonsterInstance.objects.filter(owner=summoner, stars=1).order_by('-level', 'monster__name') elif sort_method == 'level': monster_stable = OrderedDict() monster_stable['40-31'] = MonsterInstance.objects.filter(owner=summoner, level__gt=30).order_by('-level', '-stars', 'monster__name') monster_stable['30-21'] = MonsterInstance.objects.filter(owner=summoner, level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__name') monster_stable['20-11'] = MonsterInstance.objects.filter(owner=summoner, level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__name') monster_stable['10-1'] = MonsterInstance.objects.filter(owner=summoner, level__lte=10).order_by('-level', '-stars', 'monster__name') elif sort_method == 'attribute': monster_stable = OrderedDict() monster_stable['water'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = MonsterInstance.objects.filter(owner=summoner, monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable return render(request, 'herders/profile/profile_box.html', context) else: raise Http404('Unknown profile view mode') else: return render(request, 'herders/profile/not_public.html') @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = EditProfileForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'is_owner': True, # Because of @login_required decorator 'profile_name': profile_name, 'return_path': return_path, 'profile_form': form, } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) @login_required def profile_storage(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'is_owner': True, 'profile_name': request.user.username, 'storage_form': form, 'view': 'profile', 'profile_view': 'materials', } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/profile/profile_storage.html', context) @login_required() def monster_instance_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) form = AddMonsterInstanceForm(request.POST or None) if form.is_valid() and request.method == 'POST': # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() return redirect(return_path) else: # Re-show same page but with form filled in and errors shown context = { 'profile_name': profile_name, 'add_monster_form': form, 'return_path': return_path, 'is_owner': True, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_add.html', context) def monster_instance_view(request, profile_name, instance_id): context = { 'view': 'profile', } return render(request, 'herders/unimplemented.html') def monster_instance_edit(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = monster.owner == request.user.summoner form = EditMonsterInstanceForm(request.POST or None, instance=monster) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'edit_monster_form': form, 'view': 'profile', } if request.method == 'POST': if is_owner: if form.is_valid(): form.save() return redirect(return_path) else: # Redisplay form with validation error messages context['validation_errors'] = form.non_field_errors() return render(request, 'herders/profile/profile_monster_edit.html', context) else: raise PermissionDenied() else: return render(request, 'herders/profile/profile_monster_edit.html', context) @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): context = { 'view': 'profile', } return render(request, 'herders/unimplemented.html') @login_required() def monster_instance_awaken(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = monster.owner == request.user.summoner form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = request.path + '?next=' + return_path context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': request.user.username, 'is_owner': is_owner, # Because of @login_required decorator 'monster': monster, 'awaken_monster_form': form, } if request.method == 'POST' and form.is_valid() and is_owner: # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) if monster.monster.awaken_magic_mats_high: summoner.storage_magic_high -= monster.monster.awaken_magic_mats_high if monster.monster.awaken_magic_mats_mid: summoner.storage_magic_mid -= monster.monster.awaken_magic_mats_mid if monster.monster.awaken_magic_mats_low: summoner.storage_magic_low -= monster.monster.awaken_magic_mats_low if monster.monster.element == Monster.ELEMENT_FIRE: if monster.monster.awaken_ele_mats_high: summoner.storage_fire_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_fire_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_fire_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WATER: if monster.monster.awaken_ele_mats_high: summoner.storage_water_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_water_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_water_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WIND: if monster.monster.awaken_ele_mats_high: summoner.storage_wind_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_wind_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_wind_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_DARK: if monster.monster.awaken_ele_mats_high: summoner.storage_dark_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_dark_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_dark_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_LIGHT: if monster.monster.awaken_ele_mats_high: summoner.storage_light_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_light_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_light_low -= monster.monster.awaken_ele_mats_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to() monster.save() return redirect(return_path) else: # Retreive list of awakening materials from summoner profile summoner = Summoner.objects.get(user=request.user) available_materials = { 'storage_magic_low': summoner.storage_magic_low, 'storage_magic_mid': summoner.storage_magic_mid, 'storage_magic_high': summoner.storage_magic_high } if monster.monster.element == Monster.ELEMENT_FIRE: available_materials['storage_ele_low'] = summoner.storage_fire_low available_materials['storage_ele_mid'] = summoner.storage_fire_mid available_materials['storage_ele_high'] = summoner.storage_fire_high elif monster.monster.element == Monster.ELEMENT_WATER: available_materials['storage_ele_low'] = summoner.storage_water_low available_materials['storage_ele_mid'] = summoner.storage_water_mid available_materials['storage_ele_high'] = summoner.storage_water_high elif monster.monster.element == Monster.ELEMENT_WIND: available_materials['storage_ele_low'] = summoner.storage_wind_low available_materials['storage_ele_mid'] = summoner.storage_wind_mid available_materials['storage_ele_high'] = summoner.storage_wind_high elif monster.monster.element == Monster.ELEMENT_DARK: available_materials['storage_ele_low'] = summoner.storage_dark_low available_materials['storage_ele_mid'] = summoner.storage_dark_mid available_materials['storage_ele_high'] = summoner.storage_dark_high elif monster.monster.element == Monster.ELEMENT_LIGHT: available_materials['storage_ele_low'] = summoner.storage_light_low available_materials['storage_ele_mid'] = summoner.storage_light_mid available_materials['storage_ele_high'] = summoner.storage_light_high context['available_materials'] = available_materials return render(request, 'herders/profile/profile_awaken.html', context) @login_required def fusion(request, profile_name): context = { 'view': 'fusion', } return render(request, 'herders/unimplemented.html', context) @login_required def teams(request, profile_name): context = { 'view': 'teams', } return render(request, 'herders/unimplemented.html', context) def bestiary(request, monster_element=None): print monster_element context = { 'view': 'bestiary', 'monster_element': monster_element, } if monster_element is not None: if monster_element == 'all': context['monster_list'] = Monster.objects.all() else: context['monster_list'] = get_list_or_404(Monster, element=monster_element) else: context['no_filter'] = True return render(request, 'herders/bestiary.html', context) def bestiary_detail(request, monster_id): context = { 'view': 'bestiary', } return render(request, 'herders/unimplemented.html')
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import sys from spack import * class Mpibind(AutotoolsPackage): """A memory-driven algorithm to map parallel codes to heterogeneous architectures""" homepage = "https://github.com/LLNL/mpibind" url = "https://github.com/LLNL/mpibind/archive/refs/tags/v0.5.0.tar.gz" git = "https://github.com/LLNL/mpibind.git" maintainers = ['eleon'] # The build process uses 'git describe --tags' to get the # package version, thus we need 'get_full_repo' version('master', branch='master', get_full_repo=True) version('0.5.0', sha256='51bb27341109aeef121a8630bd56f5551c70ebfd337a459fb70ef9015d97d2b7') variant('cuda', default=False, description='Build w/support for NVIDIA GPUs.') variant('rocm', default=False, description='Build w/support for AMD GPUs.') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build') depends_on('pkgconf', type='build') depends_on('hwloc@2:+libxml2', type='link') depends_on('hwloc@2:+pci', when=(sys.platform != 'darwin'), type='link') depends_on('hwloc@2:+cuda+nvml', when='+cuda', type='link') depends_on('hwloc@2.4:+rocm+opencl', when='+rocm', type='link') def autoreconf(self, spec, prefix): autoreconf('--install', '--verbose', '--force') # To build and run the tests, make sure 'libtap' is installed # on the target system and is recognized by pkg-config. # Unfortunately, libtap is not in Spack. mpibind: add v0.7.0 and new flux variant (#26359) # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import sys from spack import * class Mpibind(AutotoolsPackage): """A memory-driven algorithm to map parallel codes to heterogeneous architectures""" homepage = "https://github.com/LLNL/mpibind" url = "https://github.com/LLNL/mpibind/archive/refs/tags/v0.5.0.tar.gz" git = "https://github.com/LLNL/mpibind.git" maintainers = ['eleon'] # The build process uses 'git describe --tags' to get the # package version, thus we need 'get_full_repo' version('master', branch='master', get_full_repo=True) version('0.7.0', sha256='33077e7eb50322d2bcfe87bb3ea9159c2e49f6f045cbbcd2e69e763c3bec4330') version('0.5.0', sha256='51bb27341109aeef121a8630bd56f5551c70ebfd337a459fb70ef9015d97d2b7') variant('cuda', default=False, description='Build w/support for NVIDIA GPUs.') variant('rocm', default=False, description='Build w/support for AMD GPUs.') variant('flux', default=False, description='Build the Flux plugin.') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build') depends_on('pkgconf', type='build') depends_on('hwloc@2:+libxml2', type='link') depends_on('hwloc@2:+pci', when=(sys.platform != 'darwin'), type='link') depends_on('hwloc@2:+cuda+nvml', when='+cuda', type='link') depends_on('hwloc@2.4:+rocm+opencl', when='+rocm', type='link') # Requiring @master temporarily while Flux adds # FLUX_SHELL_RC_PATH to a stable version (>0.29.0). # mpibind will require at least such version. depends_on('flux-core@master', when='+flux', type='link') def autoreconf(self, spec, prefix): autoreconf('--install', '--verbose', '--force') @when('+flux') def setup_run_environment(self, env): """Load the mpibind plugin into Flux""" env.prepend_path('FLUX_SHELL_RC_PATH', join_path(self.prefix, 'share', 'mpibind')) # To build and run the tests, make sure 'libtap' is installed # on the target system and is recognized by pkg-config. # Unfortunately, libtap is not in Spack.
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Views for Sponsor profiles. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', '"Lennard de Rijk" <ljvderijk@gmail.com>', '"Pawel Solyga" <pawel.solyga@gmail.com>', ] from google.appengine.api import users from django import forms from django.utils.translation import ugettext_lazy from soc.logic import dicts from soc.logic import validate from soc.views import helper from soc.views.helper import widgets from soc.views.models import base import soc.models.document import soc.logic.models.document import soc.logic.dicts import soc.views.helper import soc.views.helper.widgets class CreateForm(helper.forms.BaseForm): """Django form displayed when Developer creates a Document. """ content = forms.fields.CharField(widget=helper.widgets.TinyMCE( attrs={'rows':10, 'cols':40})) class Meta: model = soc.models.document.Document #: list of model fields which will *not* be gathered by the form exclude = ['inheritance_line', 'author', 'created', 'modified'] def clean_partial_path(self): partial_path = self.cleaned_data.get('partial_path') # TODO(tlarsen): combine path and link_name and check for uniqueness if not validate.isPartialPathFormatValid(partial_path): raise forms.ValidationError("This partial path is in wrong format.") return partial_path def clean_link_name(self): link_name = self.cleaned_data.get('link_name') # TODO(tlarsen): combine path and link_name and check for uniqueness if not validate.isLinkNameFormatValid(link_name): raise forms.ValidationError("This link name is in wrong format.") return link_name class EditForm(CreateForm): """Django form displayed a Document is edited. """ doc_key_name = forms.fields.CharField(widget=forms.HiddenInput) created_by = forms.fields.CharField(widget=helper.widgets.ReadOnlyInput(), required=False) class View(base.View): """View methods for the Docs model """ def __init__(self, original_params=None, original_rights=None): """Defines the fields and methods required for the base View class to provide the user with list, public, create, edit and delete views. Params: original_params: a dict with params for this View original_rights: a dict with right definitions for this View """ self._logic = soc.logic.models.document.logic params = {} rights = {} params['name'] = "Document" params['name_short'] = "Docs" params['name_plural'] = "Documents" params['edit_form'] = EditForm params['create_form'] = CreateForm # TODO(tlarsen) Add support for Django style template lookup params['edit_template'] = 'soc/models/edit.html' params['public_template'] = 'soc/docs/public.html' params['list_template'] = 'soc/models/list.html' params['lists_template'] = { 'list_main': 'soc/list/list_main.html', 'list_pagination': 'soc/list/list_pagination.html', 'list_row': 'soc/docs/list/docs_row.html', 'list_heading': 'soc/docs/list/docs_heading.html', } params['delete_redirect'] = '/docs/list' params['create_redirect'] = 'soc/models/edit.html' params['save_message'] = [ugettext_lazy('Profile saved.')] params['edit_params'] = { self.DEF_SUBMIT_MSG_PARAM_NAME: self.DEF_SUBMIT_MSG_PROFILE_SAVED, } rights['list'] = [helper.access.checkIsDeveloper] rights['delete'] = [helper.access.checkIsDeveloper] params = dicts.merge(original_params, params) rights = dicts.merge(original_rights, rights) base.View.__init__(self, rights=rights, params=params) def _editPost(self, request, entity, fields): """See base.View._editPost(). """ id = users.get_current_user() user = soc.logic.models.user.logic.getForFields({'id': id}, unique=True) fields['author'] = user def _editGet(self, request, entity, form): """See base.View._editGet(). """ form.fields['created_by'].initial = entity.author.link_name form.fields['doc_key_name'].initial = entity.key().name(), view = View() create = view.create edit = view.edit delete = view.delete list = view.list public = view.public Fixed typo in docstring --HG-- extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%401017 #!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Views for Documents. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', '"Lennard de Rijk" <ljvderijk@gmail.com>', '"Pawel Solyga" <pawel.solyga@gmail.com>', ] from google.appengine.api import users from django import forms from django.utils.translation import ugettext_lazy from soc.logic import dicts from soc.logic import validate from soc.views import helper from soc.views.helper import widgets from soc.views.models import base import soc.models.document import soc.logic.models.document import soc.logic.dicts import soc.views.helper import soc.views.helper.widgets class CreateForm(helper.forms.BaseForm): """Django form displayed when Developer creates a Document. """ content = forms.fields.CharField(widget=helper.widgets.TinyMCE( attrs={'rows':10, 'cols':40})) class Meta: model = soc.models.document.Document #: list of model fields which will *not* be gathered by the form exclude = ['inheritance_line', 'author', 'created', 'modified'] def clean_partial_path(self): partial_path = self.cleaned_data.get('partial_path') # TODO(tlarsen): combine path and link_name and check for uniqueness if not validate.isPartialPathFormatValid(partial_path): raise forms.ValidationError("This partial path is in wrong format.") return partial_path def clean_link_name(self): link_name = self.cleaned_data.get('link_name') # TODO(tlarsen): combine path and link_name and check for uniqueness if not validate.isLinkNameFormatValid(link_name): raise forms.ValidationError("This link name is in wrong format.") return link_name class EditForm(CreateForm): """Django form displayed a Document is edited. """ doc_key_name = forms.fields.CharField(widget=forms.HiddenInput) created_by = forms.fields.CharField(widget=helper.widgets.ReadOnlyInput(), required=False) class View(base.View): """View methods for the Docs model """ def __init__(self, original_params=None, original_rights=None): """Defines the fields and methods required for the base View class to provide the user with list, public, create, edit and delete views. Params: original_params: a dict with params for this View original_rights: a dict with right definitions for this View """ self._logic = soc.logic.models.document.logic params = {} rights = {} params['name'] = "Document" params['name_short'] = "Docs" params['name_plural'] = "Documents" params['edit_form'] = EditForm params['create_form'] = CreateForm # TODO(tlarsen) Add support for Django style template lookup params['edit_template'] = 'soc/models/edit.html' params['public_template'] = 'soc/docs/public.html' params['list_template'] = 'soc/models/list.html' params['lists_template'] = { 'list_main': 'soc/list/list_main.html', 'list_pagination': 'soc/list/list_pagination.html', 'list_row': 'soc/docs/list/docs_row.html', 'list_heading': 'soc/docs/list/docs_heading.html', } params['delete_redirect'] = '/docs/list' params['create_redirect'] = 'soc/models/edit.html' params['save_message'] = [ugettext_lazy('Profile saved.')] params['edit_params'] = { self.DEF_SUBMIT_MSG_PARAM_NAME: self.DEF_SUBMIT_MSG_PROFILE_SAVED, } rights['list'] = [helper.access.checkIsDeveloper] rights['delete'] = [helper.access.checkIsDeveloper] params = dicts.merge(original_params, params) rights = dicts.merge(original_rights, rights) base.View.__init__(self, rights=rights, params=params) def _editPost(self, request, entity, fields): """See base.View._editPost(). """ id = users.get_current_user() user = soc.logic.models.user.logic.getForFields({'id': id}, unique=True) fields['author'] = user def _editGet(self, request, entity, form): """See base.View._editGet(). """ form.fields['created_by'].initial = entity.author.link_name form.fields['doc_key_name'].initial = entity.key().name(), view = View() create = view.create edit = view.edit delete = view.delete list = view.list public = view.public
import argparse import importlib import os import pickle import sys from . import builtins from . import find from . import packages from . import rules from . import utils from .build_inputs import BuildInputs from .environment import Environment from .version import __version__ bfgfile = 'build.bfg' def is_srcdir(path): return os.path.exists(os.path.join(path, bfgfile)) def samefile(path1, path2): if hasattr(os.path, 'samefile'): return os.path.samefile(path1, path2) else: # This isn't entirely accurate, but it's close enough, and should only # be necessary for Windows with Python 2.x. return os.path.realpath(path1) == os.path.realpath(path2) def parse_args(parser, args=None, namespace=None): def check_dir(path, check_exist=False): if not os.path.exists(path): raise ValueError('{} does not exist'.format(path)) if not os.path.isdir(path): parser.error('{} is not a directory'.format(path)) args = parser.parse_args(args, namespace) if not args.regenerate: if not args.srcdir: parser.error('at least one of srcdir or builddir must be defined') if args.builddir: check_dir(args.srcdir, check_exist=True) else: args.builddir = '.' if is_srcdir(args.srcdir): args.srcdir, args.builddir = args.builddir, args.srcdir if os.path.exists(args.builddir): check_dir(args.builddir) if samefile(args.srcdir, args.builddir): parser.error('source and build directories must be different') if not is_srcdir(args.srcdir): parser.error('source directory must contain a build.bfg file') if is_srcdir(args.builddir): parser.error('build directory must not contain a build.bfg file') if not os.path.exists(args.builddir): os.mkdir(args.builddir) args.srcdir = os.path.abspath(args.srcdir) args.builddir = os.path.abspath(args.builddir) else: args.srcdir, args.builddir = None, args.srcdir if args.srcdir: parser.error('source directory cannot be passed when regenerating') if not args.builddir: args.builddir = '.' check_dir(args.builddir, check_exist=True) args.builddir = os.path.abspath(args.builddir) return args def main(): parser = argparse.ArgumentParser() parser.add_argument('srcdir', nargs='?', help='source directory') parser.add_argument('builddir', nargs='?', help='build directory') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('--backend', default='make', help='backend') parser.add_argument('--prefix', default='/usr', help='installation prefix') parser.add_argument('--regenerate', action='store_true', help='regenerate build files') args = parse_args(parser) try: if args.regenerate: env = Environment.load(args.builddir) else: env = Environment( bfgpath=os.path.realpath(sys.argv[0]), srcdir=args.srcdir, builddir=args.builddir, backend=args.backend, install_prefix=os.path.abspath(args.prefix), variables=dict(os.environ), ) env.save(args.builddir) except Exception as e: sys.stderr.write('{}: {}\n'.format(parser.prog, e)) return 1 build = BuildInputs() os.chdir(env.srcdir) execfile(os.path.join(env.srcdir, bfgfile), builtins.bind(build, env)) backend = importlib.import_module('.' + env.backend, 'bfg9000.backends') backend.write(env, build) Fix error handling in folder validation import argparse import importlib import os import pickle import sys from . import builtins from . import find from . import packages from . import rules from . import utils from .build_inputs import BuildInputs from .environment import Environment from .version import __version__ bfgfile = 'build.bfg' def is_srcdir(path): return os.path.exists(os.path.join(path, bfgfile)) def samefile(path1, path2): if hasattr(os.path, 'samefile'): return os.path.samefile(path1, path2) else: # This isn't entirely accurate, but it's close enough, and should only # be necessary for Windows with Python 2.x. return os.path.realpath(path1) == os.path.realpath(path2) def parse_args(parser, args=None, namespace=None): def check_dir(path, check_exist=False): if not os.path.exists(path): parser.error('{!r} does not exist'.format(path)) if not os.path.isdir(path): parser.error('{!r} is not a directory'.format(path)) args = parser.parse_args(args, namespace) if not args.regenerate: if not args.srcdir: parser.error('at least one of srcdir or builddir must be defined') if args.builddir: check_dir(args.srcdir, check_exist=True) else: args.builddir = '.' if is_srcdir(args.srcdir): args.srcdir, args.builddir = args.builddir, args.srcdir if os.path.exists(args.builddir): check_dir(args.builddir) if samefile(args.srcdir, args.builddir): parser.error('source and build directories must be different') if not is_srcdir(args.srcdir): parser.error('source directory must contain a build.bfg file') if is_srcdir(args.builddir): parser.error('build directory must not contain a build.bfg file') if not os.path.exists(args.builddir): os.mkdir(args.builddir) args.srcdir = os.path.abspath(args.srcdir) args.builddir = os.path.abspath(args.builddir) else: args.srcdir, args.builddir = None, args.srcdir if args.srcdir: parser.error('source directory cannot be passed when regenerating') if not args.builddir: args.builddir = '.' check_dir(args.builddir, check_exist=True) args.builddir = os.path.abspath(args.builddir) return args def main(): parser = argparse.ArgumentParser() parser.add_argument('srcdir', nargs='?', help='source directory') parser.add_argument('builddir', nargs='?', help='build directory') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('--backend', default='make', help='backend') parser.add_argument('--prefix', default='/usr', help='installation prefix') parser.add_argument('--regenerate', action='store_true', help='regenerate build files') args = parse_args(parser) try: if args.regenerate: env = Environment.load(args.builddir) else: env = Environment( bfgpath=os.path.realpath(sys.argv[0]), srcdir=args.srcdir, builddir=args.builddir, backend=args.backend, install_prefix=os.path.abspath(args.prefix), variables=dict(os.environ), ) env.save(args.builddir) except Exception as e: sys.stderr.write('{}: {}\n'.format(parser.prog, e)) return 1 build = BuildInputs() os.chdir(env.srcdir) execfile(os.path.join(env.srcdir, bfgfile), builtins.bind(build, env)) backend = importlib.import_module('.' + env.backend, 'bfg9000.backends') backend.write(env, build)
import pyopencl as cl import numpy from pyPaSWAS.Core.SmithWaterman import SmithWaterman from pyPaSWAS.Core import STOP_DIRECTION, LEFT_DIRECTION, NO_DIRECTION, UPPER_DIRECTION, UPPER_LEFT_DIRECTION from pyPaSWAS.Core.PaSWAS import CPUcode from pyPaSWAS.Core.PaSWAS import GPUcode from pyPaSWAS.Core.StartingPoint import StartingPoint class SmithWatermanOcl(SmithWaterman): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWaterman.__init__(self, logger, score, settings) #self.oclcode = OCLcode(self.logger) # platforms: A single ICD on a computer self.platform = None # device: device which will perform computation (for example a CPU or GPU) self.device = None # context: manages a command-queue, memory, program and kernel objects self.ctx = None # queue: stores instructions for the device self.queue = None # program: the compiled kernel program self.program = None # device_type: type of device to run computations on self.device_type = 0 self._set_device_type(self.settings.device_type) self._set_platform(self.settings.platform_name) self._initialize_device(int(self.settings.device_number)) def _init_oclcode(self): # Compiling part of the CUDA code in advance self.oclcode.set_shared_xy_code(self.shared_x, self.shared_y) self.oclcode.set_direction_code(NO_DIRECTION, UPPER_LEFT_DIRECTION, UPPER_DIRECTION, LEFT_DIRECTION, STOP_DIRECTION) def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' pass def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' pass def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' pass def __del__(self): '''Destructor. Removes the current running context''' del self.program del self.queue del self.ctx del self.device del self.platform self.device_type = 0 def _set_device_type(self, device_type): '''Sets the device type''' if device_type.upper() == 'ACCELERATOR': self.device_type = cl.device_type.ACCELERATOR elif device_type.upper() == 'GPU': self.device_type = cl.device_type.GPU elif device_type.upper() == 'CPU': self.device_type = cl.device_type.CPU else: self.logger.debug("Warning: device type is set to default: CPU") self.device_type = cl.device_type.CPU def _set_platform(self, platform_name): found_platform = False for platform in cl.get_platforms(): for device in platform.get_devices(): if (platform_name.upper() in str(platform).upper() and device.get_info(cl.device_info.TYPE) == self.device_type): self.platform = platform found_platform = True break if(found_platform): self.logger.debug("Found platform {}".format(str(self.platform))) break if not (self.platform): for platform in cl.get_platforms(): for device in platform.get_devices(): if (device.get_info(cl.device_info.TYPE) == self.device_type): self.platform = platform found_platform = True break if(found_platform): self.logger.debug('Found platform {}, however this is not the platform indicated by the user'.format(str(self.platform))) break if not (self.platform): raise RuntimeError('Failed to find platform') def _initialize_device(self, device_number): ''' Initalizes a device and verifies its computational abilities. @param device_number: int value representing the device to use ''' self.logger.debug('Initializing device {0}'.format(device_number)) self.device = self.platform.get_devices(device_type=self.device_type)[device_number] self.ctx = cl.Context(devices=[self.device]) self.queue = cl.CommandQueue(self.ctx) #self.logger.debug("context:{}".format(self.ctx) ) def _device_global_mem_size(self): #return clCharacterize.usable_local_mem_size(self.device) # GLOBAL_MEM_SIZE return self.device.get_info(cl.device_info.MAX_MEM_ALLOC_SIZE) def _clear_memory(self): '''Clears the claimed memory on the device.''' self.logger.debug('Clearing device memory.') self._clear_normal_memory() self._clear_zero_copy_memory() def _clear_normal_memory(self): self.logger.debug('Clearing normal device memory.') if (self.d_sequences is not None): self.d_sequences.release() if (self.d_targets is not None): self.d_targets.release() if (self.d_matrix is not None): self.d_matrix.release() if (self.d_global_maxima is not None): self.d_global_maxima.release() def _clear_zero_copy_memory(self): self.logger.debug('Clearing zero-copy device memory.') if (self.d_starting_points_zero_copy is not None): self.d_starting_points_zero_copy.release() if (self.d_global_direction_zero_copy is not None): self.d_global_direction_zero_copy.release() if (self.d_max_possible_score_zero_copy is not None): self.d_max_possible_score_zero_copy.release() def _init_normal_memory(self): ''' #_init_memory will initialize all required memory on the device based on the current settings. Make sure to initialize these values! ''' # Sequence device memory self.logger.debug('Initializing normal device memory.') memory = self.length_of_x_sequences * self.number_of_sequences self.d_sequences = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) mem_size = memory # Target device memory memory = self.length_of_y_sequences * self.number_targets self.d_targets = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) mem_size += memory return mem_size def _init_zero_copy_memory(self): self.logger.debug('Initializing zero-copy memory.') # Starting points host memory allocation and device copy memory = (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets) self.d_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size = memory # Global directions host memory allocation and device copy memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size += memory # Maximum zero copy memory allocation and device copy memory = (self.number_of_sequences * self.number_of_targets * SmithWaterman.float_size) self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size += memory return mem_size def _init_memory(self): mem_size = self._init_normal_memory() mem_size += self._init_zero_copy_memory() self.logger.debug('Allocated: {}MB of memory'.format(str(mem_size / 1024.0 / 1024.00))) def _init_zero_copy(self): ''' Initializes the index used for the 'zero copy' of the found starting points ''' self.d_index_increment = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=SmithWaterman.int_size) index = numpy.zeros((1), dtype=numpy.int32) cl.enqueue_write_buffer(self.queue, self.d_index_increment, index).wait() def _compile_code(self): """Compile the device code with current settings""" self.logger.debug('Compiling OpenCL code.') code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences) #self.logger.debug('Code: \n{}'.format(code)) self.program = cl.Program(self.ctx, code).build() def copy_sequences(self, h_sequences, h_targets): ''' Copy the sequences and targets to the device @param h_sequences: the sequences to be copied. Should be a single string containing all sequences @param h_targets: the targets to be copied. Should be a single string containing all sequences ''' cl.enqueue_copy(self.queue, self.d_sequences, h_sequences).wait() cl.enqueue_copy(self.queue, self.d_targets, h_targets).wait() def _get_number_of_starting_points(self): ''' Returns the number of startingpoints. ''' self.logger.debug('Getting number of starting points.') self.index = numpy.zeros((1), dtype=numpy.int32) cl.enqueue_copy(self.queue, self.index, self.d_index_increment) return self.index[0] def _fill_max_possible_score(self, target_index, targets, i, index, records_seqs): for tI in range(self.number_of_targets): if tI+target_index < len(targets) and i+index < len(records_seqs): self.set_minimum_score(tI*self.max_sequences + i, float(self.score.highest_score) * (len(records_seqs[i+index]) if len(records_seqs[i+index]) < len(targets[tI+target_index]) else len(targets[tI+target_index])) * float(self.filter_factor)) def _set_max_possible_score(self, target_index, targets, i, index, records_seqs): '''fills the max_possible_score datastructure on the host''' self.h_max_possible_score_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_max_possible_score_zero_copy, cl.map_flags.WRITE, 0, self.number_of_sequences * self.number_targets , dtype=numpy.float32)[0] self._fill_max_possible_score(target_index, targets, i, index, records_seqs) #Unmap memory object del self.h_max_possible_score_zero_copy def _get_starting_point_byte_array(self): ''' Get the resulting starting points @return gives the resulting starting point array as byte array ''' self.h_starting_points_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_starting_points_zero_copy, cl.map_flags.READ, 0, (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets, 1), dtype=numpy.byte)[0] return self.h_starting_points_zero_copy def _print_alignments(self, sequences, targets, start_seq, start_target, hit_list=None): SmithWaterman._print_alignments(self, sequences, targets, start_seq, start_target, hit_list) #unmap memory objects del self.h_global_direction_zero_copy del self.h_starting_points_zero_copy class SmithWatermanCPU(SmithWatermanOcl): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanOcl.__init__(self, logger, score, settings) self.oclcode = CPUcode(self.logger) self.workload_x = 4 self.workload_y = 4 self.workgroup_x = self.shared_x // self.workload_x self.workgroup_y = self.shared_y // self.workload_y self.d_semaphores = None self._init_oclcode() def _init_normal_memory(self): mem_size = SmithWatermanOcl._init_normal_memory(self) # Input matrix device memory memory = (SmithWaterman.float_size * (self.length_of_x_sequences + 1) * self.number_of_sequences * (self.length_of_y_sequences + 1) * self.number_targets) self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory pattern = numpy.zeros((1),dtype=numpy.float32) cl.enqueue_fill_buffer(self.queue, self.d_matrix, pattern, 0, size = memory) # Maximum global device memory memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences * self.y_div_shared_y * self.number_targets * self.workload_x * self.workload_y) self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory memory = (SmithWaterman.int_size * self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_semaphores = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) pattern = numpy.zeros((1),dtype=numpy.int32) cl.enqueue_fill_buffer(self.queue, self.d_semaphores, pattern, 0, size=memory) mem_size += memory return mem_size def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _get_direction(self, direction_array, sequence, target, block_x, block_y, value_x, value_y): <<<<<<< HEAD #return direction_array[sequence][target][starting_point.value_x][starting_point.value_y] ======= #self.logger.debug("{}, {}, {}, {}".format(sequence,target,block_x*self.workload_x + value_x,block_y*self.workload_y + value_y)) #return direction_array[sequence][target][block_x*self.workload_x + value_x][block_y*self.workload_y + value_y] >>>>>>> branch 'pyopencl' of https://github.com/swarris/pyPaSWAS.git return direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] def _set_direction(self, direction, direction_array, sequence, target, block_x, block_y, value_x, value_y): <<<<<<< HEAD #direction_array[sequence][target][starting_point.value_x][starting_point.value_y] = direction direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] = direction ======= #direction_array[sequence][target][block_x*self.workload_x + value_x][block_y*self.workload_y + value_y] = direction direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] = direction >>>>>>> branch 'pyopencl' of https://github.com/swarris/pyPaSWAS.git def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' dim_block = (self.workgroup_x, self.workgroup_y) dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y) self.program.calculateScore(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_sequences, self.d_targets, self.d_global_maxima, self.d_global_direction_zero_copy).wait() def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' dim_block = (self.workgroup_x, self.workgroup_y) dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y) self.program.traceback(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_global_maxima, self.d_global_direction_zero_copy, self.d_index_increment, self.d_starting_points_zero_copy, self.d_max_possible_score_zero_copy, self.d_semaphores).wait() def _clear_memory(self): SmithWatermanOcl._clear_memory(self) if (self.d_semaphores is not None): self.d_semaphores.release() class SmithWatermanGPU(SmithWatermanOcl): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanOcl.__init__(self, logger, score, settings) self.oclcode = GPUcode(self.logger) self._init_oclcode() def _init_normal_memory(self): mem_size = SmithWatermanOcl._init_normal_memory(self) # Input matrix device memory memory = (SmithWaterman.float_size * self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory # Maximum global device memory memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences * self.y_div_shared_y * self.number_targets) self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory return mem_size def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.x_div_shared_x, self.y_div_shared_y, self.shared_x, self.shared_y), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' dim_block = (self.shared_x, self.shared_y) dim_grid_sw = (self.number_of_sequences * self.shared_x, self.number_targets * number_of_blocks * self.shared_y) self.program.calculateScore(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_sequences, self.d_targets, self.d_global_maxima, self.d_global_direction_zero_copy).wait() def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' dim_block = (self.shared_x, self.shared_y) dim_grid_sw = (self.number_of_sequences * self.shared_x, self.number_targets * number_of_blocks * self.shared_y) self.program.traceback(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_global_maxima, self.d_global_direction_zero_copy, self.d_index_increment, self.d_starting_points_zero_copy, self.d_max_possible_score_zero_copy).wait() class SmithWatermanNVIDIA(SmithWatermanGPU): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanGPU.__init__(self, logger, score, settings) self.pinned_starting_points_zero_copy = None self.pinned_max_possible_score_zero_copy = None self.pinned_global_direction_zero_copy = None self._init_oclcode() def _init_zero_copy_memory(self): self.logger.debug('Initializing NVIDIA zero-copy memory.') # Starting points host memory allocation and device copy memory = (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets) self.pinned_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=memory) self.h_starting_points_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_starting_points_zero_copy, cl.map_flags.READ, 0, (memory, 1), dtype=numpy.byte)[0] mem_size = memory # Global directions host memory allocation and device copy memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.pinned_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=memory) self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_global_direction_zero_copy, cl.map_flags.READ, 0, (memory, 1), dtype=numpy.byte)[0] mem_size += memory # Maximum zero copy memory allocation and device copy memory = (self.number_of_sequences * self.number_of_targets * SmithWaterman.float_size) self.pinned_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) self.h_max_possible_score_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_max_possible_score_zero_copy, cl.map_flags.WRITE, 0, (self.number_of_sequences * self.number_of_targets, 1), dtype=numpy.float32)[0] mem_size += memory # Zero copy buffers are allocated twice in NVIDIA return 2*mem_size def _set_max_possible_score(self, target_index, targets, i, index, records_seqs): cl.enqueue_copy(self.queue, self.d_max_possible_score_zero_copy, self.h_max_possible_score_zero_copy).wait() self._fill_max_possible_score(target_index, targets, i, index, records_seqs) def _get_direction_byte_array(self): # cl.enqueue_copy(self.queue, self.h_global_direction_zero_copy, self.d_global_direction_zero_copy).wait() self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.x_div_shared_x, self.y_div_shared_y, self.shared_x, self.shared_y), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _get_starting_point_byte_array(self): cl.enqueue_copy(self.queue, self.h_starting_points_zero_copy, self.d_starting_points_zero_copy).wait() return self.h_starting_points_zero_copy def _clear_zero_copy_memory(self): SmithWatermanGPU._clear_zero_copy_memory(self) if (self.pinned_starting_points_zero_copy is not None): self.pinned_starting_points_zero_copy.release() if (self.pinned_global_direction_zero_copy is not None): self.pinned_global_direction_zero_copy.release() if (self.pinned_max_possible_score_zero_copy is not None): self.pinned_max_possible_score_zero_copy.release() def _compile_ocl_code(self): """Compile the OpenCL code with current settings""" self.logger.debug('Compiling NVIDIA OpenCL code.') code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences) self.program = cl.Program(self.ctx, code).build(options=['-D', 'NVIDIA']) Fixed staging error import pyopencl as cl import numpy from pyPaSWAS.Core.SmithWaterman import SmithWaterman from pyPaSWAS.Core import STOP_DIRECTION, LEFT_DIRECTION, NO_DIRECTION, UPPER_DIRECTION, UPPER_LEFT_DIRECTION from pyPaSWAS.Core.PaSWAS import CPUcode from pyPaSWAS.Core.PaSWAS import GPUcode from pyPaSWAS.Core.StartingPoint import StartingPoint class SmithWatermanOcl(SmithWaterman): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWaterman.__init__(self, logger, score, settings) #self.oclcode = OCLcode(self.logger) # platforms: A single ICD on a computer self.platform = None # device: device which will perform computation (for example a CPU or GPU) self.device = None # context: manages a command-queue, memory, program and kernel objects self.ctx = None # queue: stores instructions for the device self.queue = None # program: the compiled kernel program self.program = None # device_type: type of device to run computations on self.device_type = 0 self._set_device_type(self.settings.device_type) self._set_platform(self.settings.platform_name) self._initialize_device(int(self.settings.device_number)) def _init_oclcode(self): # Compiling part of the CUDA code in advance self.oclcode.set_shared_xy_code(self.shared_x, self.shared_y) self.oclcode.set_direction_code(NO_DIRECTION, UPPER_LEFT_DIRECTION, UPPER_DIRECTION, LEFT_DIRECTION, STOP_DIRECTION) def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' pass def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' pass def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' pass def __del__(self): '''Destructor. Removes the current running context''' del self.program del self.queue del self.ctx del self.device del self.platform self.device_type = 0 def _set_device_type(self, device_type): '''Sets the device type''' if device_type.upper() == 'ACCELERATOR': self.device_type = cl.device_type.ACCELERATOR elif device_type.upper() == 'GPU': self.device_type = cl.device_type.GPU elif device_type.upper() == 'CPU': self.device_type = cl.device_type.CPU else: self.logger.debug("Warning: device type is set to default: CPU") self.device_type = cl.device_type.CPU def _set_platform(self, platform_name): found_platform = False for platform in cl.get_platforms(): for device in platform.get_devices(): if (platform_name.upper() in str(platform).upper() and device.get_info(cl.device_info.TYPE) == self.device_type): self.platform = platform found_platform = True break if(found_platform): self.logger.debug("Found platform {}".format(str(self.platform))) break if not (self.platform): for platform in cl.get_platforms(): for device in platform.get_devices(): if (device.get_info(cl.device_info.TYPE) == self.device_type): self.platform = platform found_platform = True break if(found_platform): self.logger.debug('Found platform {}, however this is not the platform indicated by the user'.format(str(self.platform))) break if not (self.platform): raise RuntimeError('Failed to find platform') def _initialize_device(self, device_number): ''' Initalizes a device and verifies its computational abilities. @param device_number: int value representing the device to use ''' self.logger.debug('Initializing device {0}'.format(device_number)) self.device = self.platform.get_devices(device_type=self.device_type)[device_number] self.ctx = cl.Context(devices=[self.device]) self.queue = cl.CommandQueue(self.ctx) #self.logger.debug("context:{}".format(self.ctx) ) def _device_global_mem_size(self): #return clCharacterize.usable_local_mem_size(self.device) # GLOBAL_MEM_SIZE return self.device.get_info(cl.device_info.MAX_MEM_ALLOC_SIZE) def _clear_memory(self): '''Clears the claimed memory on the device.''' self.logger.debug('Clearing device memory.') self._clear_normal_memory() self._clear_zero_copy_memory() def _clear_normal_memory(self): self.logger.debug('Clearing normal device memory.') if (self.d_sequences is not None): self.d_sequences.release() if (self.d_targets is not None): self.d_targets.release() if (self.d_matrix is not None): self.d_matrix.release() if (self.d_global_maxima is not None): self.d_global_maxima.release() def _clear_zero_copy_memory(self): self.logger.debug('Clearing zero-copy device memory.') if (self.d_starting_points_zero_copy is not None): self.d_starting_points_zero_copy.release() if (self.d_global_direction_zero_copy is not None): self.d_global_direction_zero_copy.release() if (self.d_max_possible_score_zero_copy is not None): self.d_max_possible_score_zero_copy.release() def _init_normal_memory(self): ''' #_init_memory will initialize all required memory on the device based on the current settings. Make sure to initialize these values! ''' # Sequence device memory self.logger.debug('Initializing normal device memory.') memory = self.length_of_x_sequences * self.number_of_sequences self.d_sequences = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) mem_size = memory # Target device memory memory = self.length_of_y_sequences * self.number_targets self.d_targets = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) mem_size += memory return mem_size def _init_zero_copy_memory(self): self.logger.debug('Initializing zero-copy memory.') # Starting points host memory allocation and device copy memory = (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets) self.d_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size = memory # Global directions host memory allocation and device copy memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size += memory # Maximum zero copy memory allocation and device copy memory = (self.number_of_sequences * self.number_of_targets * SmithWaterman.float_size) self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.ALLOC_HOST_PTR, size=memory) mem_size += memory return mem_size def _init_memory(self): mem_size = self._init_normal_memory() mem_size += self._init_zero_copy_memory() self.logger.debug('Allocated: {}MB of memory'.format(str(mem_size / 1024.0 / 1024.00))) def _init_zero_copy(self): ''' Initializes the index used for the 'zero copy' of the found starting points ''' self.d_index_increment = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=SmithWaterman.int_size) index = numpy.zeros((1), dtype=numpy.int32) cl.enqueue_write_buffer(self.queue, self.d_index_increment, index).wait() def _compile_code(self): """Compile the device code with current settings""" self.logger.debug('Compiling OpenCL code.') code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences) #self.logger.debug('Code: \n{}'.format(code)) self.program = cl.Program(self.ctx, code).build() def copy_sequences(self, h_sequences, h_targets): ''' Copy the sequences and targets to the device @param h_sequences: the sequences to be copied. Should be a single string containing all sequences @param h_targets: the targets to be copied. Should be a single string containing all sequences ''' cl.enqueue_copy(self.queue, self.d_sequences, h_sequences).wait() cl.enqueue_copy(self.queue, self.d_targets, h_targets).wait() def _get_number_of_starting_points(self): ''' Returns the number of startingpoints. ''' self.logger.debug('Getting number of starting points.') self.index = numpy.zeros((1), dtype=numpy.int32) cl.enqueue_copy(self.queue, self.index, self.d_index_increment) return self.index[0] def _fill_max_possible_score(self, target_index, targets, i, index, records_seqs): for tI in range(self.number_of_targets): if tI+target_index < len(targets) and i+index < len(records_seqs): self.set_minimum_score(tI*self.max_sequences + i, float(self.score.highest_score) * (len(records_seqs[i+index]) if len(records_seqs[i+index]) < len(targets[tI+target_index]) else len(targets[tI+target_index])) * float(self.filter_factor)) def _set_max_possible_score(self, target_index, targets, i, index, records_seqs): '''fills the max_possible_score datastructure on the host''' self.h_max_possible_score_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_max_possible_score_zero_copy, cl.map_flags.WRITE, 0, self.number_of_sequences * self.number_targets , dtype=numpy.float32)[0] self._fill_max_possible_score(target_index, targets, i, index, records_seqs) #Unmap memory object del self.h_max_possible_score_zero_copy def _get_starting_point_byte_array(self): ''' Get the resulting starting points @return gives the resulting starting point array as byte array ''' self.h_starting_points_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_starting_points_zero_copy, cl.map_flags.READ, 0, (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets, 1), dtype=numpy.byte)[0] return self.h_starting_points_zero_copy def _print_alignments(self, sequences, targets, start_seq, start_target, hit_list=None): SmithWaterman._print_alignments(self, sequences, targets, start_seq, start_target, hit_list) #unmap memory objects del self.h_global_direction_zero_copy del self.h_starting_points_zero_copy class SmithWatermanCPU(SmithWatermanOcl): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanOcl.__init__(self, logger, score, settings) self.oclcode = CPUcode(self.logger) self.workload_x = 4 self.workload_y = 4 self.workgroup_x = self.shared_x // self.workload_x self.workgroup_y = self.shared_y // self.workload_y self.d_semaphores = None self._init_oclcode() def _init_normal_memory(self): mem_size = SmithWatermanOcl._init_normal_memory(self) # Input matrix device memory memory = (SmithWaterman.float_size * (self.length_of_x_sequences + 1) * self.number_of_sequences * (self.length_of_y_sequences + 1) * self.number_targets) self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory pattern = numpy.zeros((1),dtype=numpy.float32) cl.enqueue_fill_buffer(self.queue, self.d_matrix, pattern, 0, size = memory) # Maximum global device memory memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences * self.y_div_shared_y * self.number_targets * self.workload_x * self.workload_y) self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory memory = (SmithWaterman.int_size * self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_semaphores = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) pattern = numpy.zeros((1),dtype=numpy.int32) cl.enqueue_fill_buffer(self.queue, self.d_semaphores, pattern, 0, size=memory) mem_size += memory return mem_size def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _get_direction(self, direction_array, sequence, target, block_x, block_y, value_x, value_y): return direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] def _set_direction(self, direction, direction_array, sequence, target, block_x, block_y, value_x, value_y): direction_array[sequence][target][block_x*self.shared_x + value_x][block_y*self.shared_y + value_y] = direction def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' dim_block = (self.workgroup_x, self.workgroup_y) dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y) self.program.calculateScore(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_sequences, self.d_targets, self.d_global_maxima, self.d_global_direction_zero_copy).wait() def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' dim_block = (self.workgroup_x, self.workgroup_y) dim_grid_sw = (self.number_of_sequences * self.workgroup_x, self.number_targets * number_of_blocks * self.workgroup_y) self.program.traceback(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_global_maxima, self.d_global_direction_zero_copy, self.d_index_increment, self.d_starting_points_zero_copy, self.d_max_possible_score_zero_copy, self.d_semaphores).wait() def _clear_memory(self): SmithWatermanOcl._clear_memory(self) if (self.d_semaphores is not None): self.d_semaphores.release() class SmithWatermanGPU(SmithWatermanOcl): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanOcl.__init__(self, logger, score, settings) self.oclcode = GPUcode(self.logger) self._init_oclcode() def _init_normal_memory(self): mem_size = SmithWatermanOcl._init_normal_memory(self) # Input matrix device memory memory = (SmithWaterman.float_size * self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.d_matrix = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory # Maximum global device memory memory = (SmithWaterman.float_size * self.x_div_shared_x * self.number_of_sequences * self.y_div_shared_y * self.number_targets) self.d_global_maxima = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, size=memory) mem_size += memory return mem_size def _get_direction_byte_array(self): ''' Get the resulting directions @return gives the resulting direction array as byte array ''' self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.x_div_shared_x, self.y_div_shared_y, self.shared_x, self.shared_y), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _execute_calculate_score_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the calculate score kernel''' dim_block = (self.shared_x, self.shared_y) dim_grid_sw = (self.number_of_sequences * self.shared_x, self.number_targets * number_of_blocks * self.shared_y) self.program.calculateScore(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_sequences, self.d_targets, self.d_global_maxima, self.d_global_direction_zero_copy).wait() def _execute_traceback_kernel(self, number_of_blocks, idx, idy): ''' Executes a single run of the traceback kernel''' dim_block = (self.shared_x, self.shared_y) dim_grid_sw = (self.number_of_sequences * self.shared_x, self.number_targets * number_of_blocks * self.shared_y) self.program.traceback(self.queue, dim_grid_sw, dim_block, self.d_matrix, numpy.int32(idx), numpy.int32(idy), numpy.int32(number_of_blocks), self.d_global_maxima, self.d_global_direction_zero_copy, self.d_index_increment, self.d_starting_points_zero_copy, self.d_max_possible_score_zero_copy).wait() class SmithWatermanNVIDIA(SmithWatermanGPU): ''' classdocs ''' def __init__(self, logger, score, settings): ''' Constructor ''' SmithWatermanGPU.__init__(self, logger, score, settings) self.pinned_starting_points_zero_copy = None self.pinned_max_possible_score_zero_copy = None self.pinned_global_direction_zero_copy = None self._init_oclcode() def _init_zero_copy_memory(self): self.logger.debug('Initializing NVIDIA zero-copy memory.') # Starting points host memory allocation and device copy memory = (self.size_of_startingpoint * self.maximum_number_starting_points * self.number_of_sequences * self.number_targets) self.pinned_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_starting_points_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=memory) self.h_starting_points_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_starting_points_zero_copy, cl.map_flags.READ, 0, (memory, 1), dtype=numpy.byte)[0] mem_size = memory # Global directions host memory allocation and device copy memory = (self.length_of_x_sequences * self.number_of_sequences * self.length_of_y_sequences * self.number_targets) self.pinned_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_global_direction_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.WRITE_ONLY, size=memory) self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_global_direction_zero_copy, cl.map_flags.READ, 0, (memory, 1), dtype=numpy.byte)[0] mem_size += memory # Maximum zero copy memory allocation and device copy memory = (self.number_of_sequences * self.number_of_targets * SmithWaterman.float_size) self.pinned_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.ALLOC_HOST_PTR, size=memory) self.d_max_possible_score_zero_copy = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY, size=memory) self.h_max_possible_score_zero_copy = cl.enqueue_map_buffer(self.queue, self.pinned_max_possible_score_zero_copy, cl.map_flags.WRITE, 0, (self.number_of_sequences * self.number_of_targets, 1), dtype=numpy.float32)[0] mem_size += memory # Zero copy buffers are allocated twice in NVIDIA return 2*mem_size def _set_max_possible_score(self, target_index, targets, i, index, records_seqs): cl.enqueue_copy(self.queue, self.d_max_possible_score_zero_copy, self.h_max_possible_score_zero_copy).wait() self._fill_max_possible_score(target_index, targets, i, index, records_seqs) def _get_direction_byte_array(self): # cl.enqueue_copy(self.queue, self.h_global_direction_zero_copy, self.d_global_direction_zero_copy).wait() self.h_global_direction_zero_copy = cl.enqueue_map_buffer(self.queue, self.d_global_direction_zero_copy, cl.map_flags.READ, 0, (self.number_of_sequences, self.number_targets, self.x_div_shared_x, self.y_div_shared_y, self.shared_x, self.shared_y), dtype=numpy.byte)[0] return self.h_global_direction_zero_copy def _get_starting_point_byte_array(self): cl.enqueue_copy(self.queue, self.h_starting_points_zero_copy, self.d_starting_points_zero_copy).wait() return self.h_starting_points_zero_copy def _clear_zero_copy_memory(self): SmithWatermanGPU._clear_zero_copy_memory(self) if (self.pinned_starting_points_zero_copy is not None): self.pinned_starting_points_zero_copy.release() if (self.pinned_global_direction_zero_copy is not None): self.pinned_global_direction_zero_copy.release() if (self.pinned_max_possible_score_zero_copy is not None): self.pinned_max_possible_score_zero_copy.release() def _compile_ocl_code(self): """Compile the OpenCL code with current settings""" self.logger.debug('Compiling NVIDIA OpenCL code.') code = self.oclcode.get_code(self.score, self.number_of_sequences, self.number_targets, self.length_of_x_sequences, self.length_of_y_sequences) self.program = cl.Program(self.ctx, code).build(options=['-D', 'NVIDIA'])
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import unittest from collections import defaultdict from contextlib import contextmanager from tempfile import mkdtemp from textwrap import dedent from pants.backend.core.targets.dependencies import Dependencies from pants.base.address import SyntheticAddress from pants.base.build_configuration import BuildConfiguration from pants.base.build_file import FilesystemBuildFile from pants.base.build_file_address_mapper import BuildFileAddressMapper from pants.base.build_file_aliases import BuildFileAliases from pants.base.build_file_parser import BuildFileParser from pants.base.build_graph import BuildGraph from pants.base.build_root import BuildRoot from pants.base.cmd_line_spec_parser import CmdLineSpecParser from pants.base.config import Config from pants.base.exceptions import TaskError from pants.base.source_root import SourceRoot from pants.base.target import Target from pants.goal.goal import Goal from pants.goal.products import MultipleRootedProducts, UnionProducts from pants.option.options import Options from pants.option.options_bootstrapper import OptionsBootstrapper, register_bootstrap_options from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import pushd, temporary_dir from pants.util.dirutil import safe_mkdir, safe_open, safe_rmtree, touch from pants_test.base.context_utils import create_context # TODO: Rename to 'TestBase', for uniformity, and also for logic: This is a baseclass # for tests, not a test of a thing called 'Base'. class BaseTest(unittest.TestCase): """A baseclass useful for tests requiring a temporary buildroot.""" def build_path(self, relpath): """Returns the canonical BUILD file path for the given relative build path.""" if os.path.basename(relpath).startswith('BUILD'): return relpath else: return os.path.join(relpath, 'BUILD') def create_dir(self, relpath): """Creates a directory under the buildroot. relpath: The relative path to the directory from the build root. """ path = os.path.join(self.build_root, relpath) safe_mkdir(path) return path def create_file(self, relpath, contents='', mode='wb'): """Writes to a file under the buildroot. relpath: The relative path to the file from the build root. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.build_root, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) return path def add_to_build_file(self, relpath, target): """Adds the given target specification to the BUILD file at relpath. relpath: The relative path to the BUILD file from the build root. target: A string containing the target definition as it would appear in a BUILD file. """ self.create_file(self.build_path(relpath), target, mode='a') cls = self.address_mapper._build_file_type return cls(root_dir=self.build_root, relpath=self.build_path(relpath)) def make_target(self, spec='', target_type=Target, dependencies=None, resources = None, derived_from=None, **kwargs): address = SyntheticAddress.parse(spec) target = target_type(name=address.target_name, address=address, build_graph=self.build_graph, **kwargs) dependencies = dependencies or [] dependencies.extend(resources or []) self.build_graph.inject_target(target, dependencies=[dep.address for dep in dependencies], derived_from=derived_from) return target @property def alias_groups(self): return BuildFileAliases.create(targets={'target': Dependencies}) def setUp(self): super(BaseTest, self).setUp() Goal.clear() self.real_build_root = BuildRoot().path self.build_root = os.path.realpath(mkdtemp(suffix='_BUILD_ROOT')) self.pants_workdir = os.path.join(self.build_root, '.pants.d') safe_mkdir(self.pants_workdir) self.options = defaultdict(dict) # scope -> key-value mapping. self.options[''] = { 'pants_workdir': self.pants_workdir, 'pants_supportdir': os.path.join(self.build_root, 'build-support'), 'pants_distdir': os.path.join(self.build_root, 'dist'), 'pants_configdir': os.path.join(self.build_root, 'config'), 'cache_key_gen_version': '0-test', } BuildRoot().path = self.build_root Subsystem.reset() self.create_file('pants.ini') build_configuration = BuildConfiguration() build_configuration.register_aliases(self.alias_groups) self.build_file_parser = BuildFileParser(build_configuration, self.build_root) self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FilesystemBuildFile) self.build_graph = BuildGraph(address_mapper=self.address_mapper) self.bootstrap_option_values = OptionsBootstrapper().get_bootstrap_options().for_global_scope() def reset_build_graph(self): """Start over with a fresh build graph with no targets in it.""" self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FilesystemBuildFile) self.build_graph = BuildGraph(address_mapper=self.address_mapper) def set_options_for_scope(self, scope, **kwargs): self.options[scope].update(kwargs) def context(self, for_task_types=None, options=None, target_roots=None, console_outstream=None, workspace=None): for_task_types = for_task_types or [] options = options or {} option_values = defaultdict(dict) registered_global_subsystems = set() # Get default values for all options registered by the tasks in for_task_types. # TODO: This is clunky and somewhat repetitive of the real registration code. for task_type in for_task_types: scope = task_type.options_scope if scope is None: raise TaskError('You must set a scope on your task type before using it in tests.') # We provide our own test-only registration implementation, bypassing argparse. # When testing we set option values directly, so we don't care about cmd-line flags, config, # env vars etc. In fact, for test isolation we explicitly don't want to look at those. def register_func(on_scope): def register(*rargs, **rkwargs): scoped_options = option_values[on_scope] default = rkwargs.get('default') if default is None and rkwargs.get('action') == 'append': default = [] for flag_name in rargs: option_name = flag_name.lstrip('-').replace('-', '_') scoped_options[option_name] = default register.bootstrap = self.bootstrap_option_values register.scope = on_scope return register register_bootstrap_options(register_func(Options.GLOBAL_SCOPE), self.build_root) task_type.register_options(register_func(scope)) for subsystem in task_type.global_subsystems(): if subsystem not in registered_global_subsystems: subsystem.register_options(register_func(subsystem.qualify_scope(Options.GLOBAL_SCOPE))) registered_global_subsystems.add(subsystem) for subsystem in task_type.task_subsystems(): subsystem.register_options(register_func(subsystem.qualify_scope(scope))) # Now override with any caller-specified values. # TODO(benjy): Get rid of the options arg, and require tests to call set_options. for scope, opts in options.items(): for key, val in opts.items(): option_values[scope][key] = val for scope, opts in self.options.items(): for key, val in opts.items(): option_values[scope][key] = val # Make inner scopes inherit option values from their enclosing scopes. # Iterating in sorted order guarantees that we see outer scopes before inner scopes, # and therefore only have to inherit from our immediately enclosing scope. for scope in sorted(option_values.keys()): if scope != Options.GLOBAL_SCOPE: enclosing_scope = scope.rpartition('.')[0] opts = option_values[scope] for key, val in option_values.get(enclosing_scope, {}).items(): if key not in opts: # Inner scope values override the inherited ones. opts[key] = val context = create_context(options=option_values, target_roots=target_roots, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, console_outstream=console_outstream, workspace=workspace) Subsystem._options = context.options return context def tearDown(self): BuildRoot().reset() SourceRoot.reset() safe_rmtree(self.build_root) FilesystemBuildFile.clear_cache() def target(self, spec): """Resolves the given target address to a Target object. address: The BUILD target address to resolve. Returns the corresponding Target or else None if the address does not point to a defined Target. """ address = SyntheticAddress.parse(spec) self.build_graph.inject_address_closure(address) return self.build_graph.get_target(address) def targets(self, spec): """Resolves a target spec to one or more Target objects. spec: Either BUILD target address or else a target glob using the siblings ':' or descendants '::' suffixes. Returns the set of all Targets found. """ spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper) addresses = list(spec_parser.parse_addresses(spec)) for address in addresses: self.build_graph.inject_address_closure(address) targets = [self.build_graph.get_target(address) for address in addresses] return targets def create_files(self, path, files): """Writes to a file under the buildroot with contents same as file name. path: The relative path to the file from the build root. files: List of file names. """ for f in files: self.create_file(os.path.join(path, f), contents=f) def create_library(self, path, target_type, name, sources=None, **kwargs): """Creates a library target of given type at the BUILD file at path with sources path: The relative path to the BUILD file from the build root. target_type: valid pants target type. name: Name of the library target. sources: List of source file at the path relative to path. **kwargs: Optional attributes that can be set for any library target. Currently it includes support for resources, java_sources, provides and dependencies. """ if sources: self.create_files(path, sources) self.add_to_build_file(path, dedent(''' %(target_type)s(name='%(name)s', %(sources)s %(resources)s %(java_sources)s %(provides)s %(dependencies)s ) ''' % dict(target_type=target_type, name=name, sources=('sources=%s,' % repr(sources) if sources else ''), resources=('resources=["%s"],' % kwargs.get('resources') if 'resources' in kwargs else ''), java_sources=('java_sources=[%s],' % ','.join(map(lambda str_target: '"%s"' % str_target, kwargs.get('java_sources'))) if 'java_sources' in kwargs else ''), provides=('provides=%s,' % kwargs.get('provides') if 'provides' in kwargs else ''), dependencies=('dependencies=%s,' % kwargs.get('dependencies') if 'dependencies' in kwargs else ''), ))) return self.target('%s:%s' % (path, name)) def create_resources(self, path, name, *sources): return self.create_library(path, 'resources', name, sources) @contextmanager def workspace(self, *buildfiles): with temporary_dir() as root_dir: with BuildRoot().temporary(root_dir): with pushd(root_dir): for buildfile in buildfiles: touch(os.path.join(root_dir, buildfile)) yield os.path.realpath(root_dir) def populate_compile_classpath(self, context, classpath=None): """ Helps actual test cases to populate the 'compile_classpath' products data mapping in the context, which holds the classpath value for targets. :param context: The execution context where the products data mapping lives. :param classpath: a list of classpath strings. If not specified, ['none'] will be used. """ compile_classpaths = context.products.get_data('compile_classpath', lambda: UnionProducts()) compile_classpaths.add_for_targets(context.targets(), [('default', entry) for entry in classpath or ['none']]) @contextmanager def add_data(self, context_products, data_type, target, *products): make_products = lambda: defaultdict(MultipleRootedProducts) data_by_target = context_products.get_data(data_type, make_products) with temporary_dir() as outdir: def create_product(product): abspath = os.path.join(outdir, product) with safe_open(abspath, mode='w') as fp: fp.write(product) return abspath data_by_target[target].add_abs_paths(outdir, map(create_product, products)) yield temporary_dir @contextmanager def add_products(self, context_products, product_type, target, *products): product_mapping = context_products.get(product_type) with temporary_dir() as outdir: def create_product(product): with safe_open(os.path.join(outdir, product), mode='w') as fp: fp.write(product) return product product_mapping.add(target, outdir, map(create_product, products)) yield temporary_dir def set_bootstrap_options(self, **values): """Override some of the bootstrap option values.""" self.bootstrap_option_values.update(values) self.set_options_for_scope(Options.GLOBAL_SCOPE, **values) Restore Config caching in tests. It turns out that some tests still use it, subtly. Things were working before because of the order in which some tests happened to be run :( TODO: Figure out that test interdependency issue. Testing Done: CI passed: https://travis-ci.org/pantsbuild/pants/builds/61229496. Bugs closed: 1487 Reviewed at https://rbcommons.com/s/twitter/r/2160/ # coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import unittest from collections import defaultdict from contextlib import contextmanager from tempfile import mkdtemp from textwrap import dedent from pants.backend.core.targets.dependencies import Dependencies from pants.base.address import SyntheticAddress from pants.base.build_configuration import BuildConfiguration from pants.base.build_file import FilesystemBuildFile from pants.base.build_file_address_mapper import BuildFileAddressMapper from pants.base.build_file_aliases import BuildFileAliases from pants.base.build_file_parser import BuildFileParser from pants.base.build_graph import BuildGraph from pants.base.build_root import BuildRoot from pants.base.cmd_line_spec_parser import CmdLineSpecParser from pants.base.config import Config from pants.base.exceptions import TaskError from pants.base.source_root import SourceRoot from pants.base.target import Target from pants.goal.goal import Goal from pants.goal.products import MultipleRootedProducts, UnionProducts from pants.option.options import Options from pants.option.options_bootstrapper import OptionsBootstrapper, register_bootstrap_options from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import pushd, temporary_dir from pants.util.dirutil import safe_mkdir, safe_open, safe_rmtree, touch from pants_test.base.context_utils import create_context # TODO: Rename to 'TestBase', for uniformity, and also for logic: This is a baseclass # for tests, not a test of a thing called 'Base'. class BaseTest(unittest.TestCase): """A baseclass useful for tests requiring a temporary buildroot.""" @classmethod def setUpClass(cls): """Ensure that all code has a config to read from the cache. TODO: Yuck. Get rid of this after plumbing options through in the right places. """ super(BaseTest, cls).setUpClass() Config.cache(Config.load()) def build_path(self, relpath): """Returns the canonical BUILD file path for the given relative build path.""" if os.path.basename(relpath).startswith('BUILD'): return relpath else: return os.path.join(relpath, 'BUILD') def create_dir(self, relpath): """Creates a directory under the buildroot. relpath: The relative path to the directory from the build root. """ path = os.path.join(self.build_root, relpath) safe_mkdir(path) return path def create_file(self, relpath, contents='', mode='wb'): """Writes to a file under the buildroot. relpath: The relative path to the file from the build root. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.build_root, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) return path def add_to_build_file(self, relpath, target): """Adds the given target specification to the BUILD file at relpath. relpath: The relative path to the BUILD file from the build root. target: A string containing the target definition as it would appear in a BUILD file. """ self.create_file(self.build_path(relpath), target, mode='a') cls = self.address_mapper._build_file_type return cls(root_dir=self.build_root, relpath=self.build_path(relpath)) def make_target(self, spec='', target_type=Target, dependencies=None, resources = None, derived_from=None, **kwargs): address = SyntheticAddress.parse(spec) target = target_type(name=address.target_name, address=address, build_graph=self.build_graph, **kwargs) dependencies = dependencies or [] dependencies.extend(resources or []) self.build_graph.inject_target(target, dependencies=[dep.address for dep in dependencies], derived_from=derived_from) return target @property def alias_groups(self): return BuildFileAliases.create(targets={'target': Dependencies}) def setUp(self): super(BaseTest, self).setUp() Goal.clear() self.real_build_root = BuildRoot().path self.build_root = os.path.realpath(mkdtemp(suffix='_BUILD_ROOT')) self.pants_workdir = os.path.join(self.build_root, '.pants.d') safe_mkdir(self.pants_workdir) self.options = defaultdict(dict) # scope -> key-value mapping. self.options[''] = { 'pants_workdir': self.pants_workdir, 'pants_supportdir': os.path.join(self.build_root, 'build-support'), 'pants_distdir': os.path.join(self.build_root, 'dist'), 'pants_configdir': os.path.join(self.build_root, 'config'), 'cache_key_gen_version': '0-test', } BuildRoot().path = self.build_root Subsystem.reset() self.create_file('pants.ini') build_configuration = BuildConfiguration() build_configuration.register_aliases(self.alias_groups) self.build_file_parser = BuildFileParser(build_configuration, self.build_root) self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FilesystemBuildFile) self.build_graph = BuildGraph(address_mapper=self.address_mapper) self.bootstrap_option_values = OptionsBootstrapper().get_bootstrap_options().for_global_scope() def reset_build_graph(self): """Start over with a fresh build graph with no targets in it.""" self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FilesystemBuildFile) self.build_graph = BuildGraph(address_mapper=self.address_mapper) def set_options_for_scope(self, scope, **kwargs): self.options[scope].update(kwargs) def context(self, for_task_types=None, options=None, target_roots=None, console_outstream=None, workspace=None): for_task_types = for_task_types or [] options = options or {} option_values = defaultdict(dict) registered_global_subsystems = set() # Get default values for all options registered by the tasks in for_task_types. # TODO: This is clunky and somewhat repetitive of the real registration code. for task_type in for_task_types: scope = task_type.options_scope if scope is None: raise TaskError('You must set a scope on your task type before using it in tests.') # We provide our own test-only registration implementation, bypassing argparse. # When testing we set option values directly, so we don't care about cmd-line flags, config, # env vars etc. In fact, for test isolation we explicitly don't want to look at those. def register_func(on_scope): def register(*rargs, **rkwargs): scoped_options = option_values[on_scope] default = rkwargs.get('default') if default is None and rkwargs.get('action') == 'append': default = [] for flag_name in rargs: option_name = flag_name.lstrip('-').replace('-', '_') scoped_options[option_name] = default register.bootstrap = self.bootstrap_option_values register.scope = on_scope return register register_bootstrap_options(register_func(Options.GLOBAL_SCOPE), self.build_root) task_type.register_options(register_func(scope)) for subsystem in task_type.global_subsystems(): if subsystem not in registered_global_subsystems: subsystem.register_options(register_func(subsystem.qualify_scope(Options.GLOBAL_SCOPE))) registered_global_subsystems.add(subsystem) for subsystem in task_type.task_subsystems(): subsystem.register_options(register_func(subsystem.qualify_scope(scope))) # Now override with any caller-specified values. # TODO(benjy): Get rid of the options arg, and require tests to call set_options. for scope, opts in options.items(): for key, val in opts.items(): option_values[scope][key] = val for scope, opts in self.options.items(): for key, val in opts.items(): option_values[scope][key] = val # Make inner scopes inherit option values from their enclosing scopes. # Iterating in sorted order guarantees that we see outer scopes before inner scopes, # and therefore only have to inherit from our immediately enclosing scope. for scope in sorted(option_values.keys()): if scope != Options.GLOBAL_SCOPE: enclosing_scope = scope.rpartition('.')[0] opts = option_values[scope] for key, val in option_values.get(enclosing_scope, {}).items(): if key not in opts: # Inner scope values override the inherited ones. opts[key] = val context = create_context(options=option_values, target_roots=target_roots, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, console_outstream=console_outstream, workspace=workspace) Subsystem._options = context.options return context def tearDown(self): BuildRoot().reset() SourceRoot.reset() safe_rmtree(self.build_root) FilesystemBuildFile.clear_cache() def target(self, spec): """Resolves the given target address to a Target object. address: The BUILD target address to resolve. Returns the corresponding Target or else None if the address does not point to a defined Target. """ address = SyntheticAddress.parse(spec) self.build_graph.inject_address_closure(address) return self.build_graph.get_target(address) def targets(self, spec): """Resolves a target spec to one or more Target objects. spec: Either BUILD target address or else a target glob using the siblings ':' or descendants '::' suffixes. Returns the set of all Targets found. """ spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper) addresses = list(spec_parser.parse_addresses(spec)) for address in addresses: self.build_graph.inject_address_closure(address) targets = [self.build_graph.get_target(address) for address in addresses] return targets def create_files(self, path, files): """Writes to a file under the buildroot with contents same as file name. path: The relative path to the file from the build root. files: List of file names. """ for f in files: self.create_file(os.path.join(path, f), contents=f) def create_library(self, path, target_type, name, sources=None, **kwargs): """Creates a library target of given type at the BUILD file at path with sources path: The relative path to the BUILD file from the build root. target_type: valid pants target type. name: Name of the library target. sources: List of source file at the path relative to path. **kwargs: Optional attributes that can be set for any library target. Currently it includes support for resources, java_sources, provides and dependencies. """ if sources: self.create_files(path, sources) self.add_to_build_file(path, dedent(''' %(target_type)s(name='%(name)s', %(sources)s %(resources)s %(java_sources)s %(provides)s %(dependencies)s ) ''' % dict(target_type=target_type, name=name, sources=('sources=%s,' % repr(sources) if sources else ''), resources=('resources=["%s"],' % kwargs.get('resources') if 'resources' in kwargs else ''), java_sources=('java_sources=[%s],' % ','.join(map(lambda str_target: '"%s"' % str_target, kwargs.get('java_sources'))) if 'java_sources' in kwargs else ''), provides=('provides=%s,' % kwargs.get('provides') if 'provides' in kwargs else ''), dependencies=('dependencies=%s,' % kwargs.get('dependencies') if 'dependencies' in kwargs else ''), ))) return self.target('%s:%s' % (path, name)) def create_resources(self, path, name, *sources): return self.create_library(path, 'resources', name, sources) @contextmanager def workspace(self, *buildfiles): with temporary_dir() as root_dir: with BuildRoot().temporary(root_dir): with pushd(root_dir): for buildfile in buildfiles: touch(os.path.join(root_dir, buildfile)) yield os.path.realpath(root_dir) def populate_compile_classpath(self, context, classpath=None): """ Helps actual test cases to populate the 'compile_classpath' products data mapping in the context, which holds the classpath value for targets. :param context: The execution context where the products data mapping lives. :param classpath: a list of classpath strings. If not specified, ['none'] will be used. """ compile_classpaths = context.products.get_data('compile_classpath', lambda: UnionProducts()) compile_classpaths.add_for_targets(context.targets(), [('default', entry) for entry in classpath or ['none']]) @contextmanager def add_data(self, context_products, data_type, target, *products): make_products = lambda: defaultdict(MultipleRootedProducts) data_by_target = context_products.get_data(data_type, make_products) with temporary_dir() as outdir: def create_product(product): abspath = os.path.join(outdir, product) with safe_open(abspath, mode='w') as fp: fp.write(product) return abspath data_by_target[target].add_abs_paths(outdir, map(create_product, products)) yield temporary_dir @contextmanager def add_products(self, context_products, product_type, target, *products): product_mapping = context_products.get(product_type) with temporary_dir() as outdir: def create_product(product): with safe_open(os.path.join(outdir, product), mode='w') as fp: fp.write(product) return product product_mapping.add(target, outdir, map(create_product, products)) yield temporary_dir def set_bootstrap_options(self, **values): """Override some of the bootstrap option values.""" self.bootstrap_option_values.update(values) self.set_options_for_scope(Options.GLOBAL_SCOPE, **values)
from collections import OrderedDict from django.http import Http404, HttpResponseForbidden from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied from django.contrib import messages from django.contrib.auth import authenticate, login from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.core.cache import cache from django.db import IntegrityError from django.db.models import Q from django.forms.formsets import formset_factory from django.shortcuts import render, redirect, get_object_or_404 from .forms import RegisterUserForm, AddMonsterInstanceForm, EditMonsterInstanceForm, AwakenMonsterInstanceForm, \ PowerUpMonsterInstanceForm, EditEssenceStorageForm, EditSummonerForm, EditUserForm, EditTeamForm, AddTeamGroupForm, \ DeleteTeamGroupForm from .models import Monster, Summoner, MonsterInstance, Fusion, TeamGroup, Team from .fusion import essences_missing, total_awakening_cost def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) def profile(request, profile_name=None, view_mode='list', sort_method='grade'): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in. ') summoner = get_object_or_404(Summoner, user__username=profile_name) # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'view_mode': view_mode, 'sort_method': sort_method, 'return_path': request.path, 'view': 'profile', } if is_owner or summoner.public: if view_mode.lower() == 'list': context['monster_stable'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner) return render(request, 'herders/profile/profile_view.html', context) elif view_mode.lower() == 'box': if sort_method == 'grade': monster_stable = OrderedDict() monster_stable['6*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=6).order_by('-level', 'monster__name') monster_stable['5*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=5).order_by('-level', 'monster__name') monster_stable['4*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=4).order_by('-level', 'monster__name') monster_stable['3*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=3).order_by('-level', 'monster__name') monster_stable['2*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=2).order_by('-level', 'monster__name') monster_stable['1*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=1).order_by('-level', 'monster__name') elif sort_method == 'level': monster_stable = OrderedDict() monster_stable['40'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level=40).order_by('-level', '-stars', 'monster__name') monster_stable['39-31'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__name') monster_stable['30-21'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__name') monster_stable['20-11'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__name') monster_stable['10-1'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__lte=10).order_by('-level', '-stars', 'monster__name') elif sort_method == 'attribute': monster_stable = OrderedDict() monster_stable['water'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable return render(request, 'herders/profile/profile_box.html', context) else: raise Http404('Unknown profile view mode') else: return render(request, 'herders/profile/not_public.html') @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.success(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def profile_storage(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'is_owner': is_owner, 'profile_name': request.user.username, 'summoner': summoner, 'storage_form': form, 'view': 'storage', 'profile_view': 'materials', } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/essence_storage.html', context) @login_required() def monster_instance_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddMonsterInstanceForm(request.POST or None) if form.is_valid() and request.method == 'POST': # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: # Re-show same page but with form filled in and errors shown context = { 'profile_name': profile_name, 'summoner': summoner, 'add_monster_form': form, 'return_path': return_path, 'is_owner': is_owner, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_add.html', context) @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: MonsterInstance.objects.create(owner=summoner, monster=monster_to_add, stars=stars, level=level, fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) return redirect(return_path) else: return HttpResponseForbidden() def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) context = { 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_view.html', context) @login_required() def monster_instance_edit(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = EditMonsterInstanceForm(request.POST or None, instance=monster) form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'summoner': summoner, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'edit_monster_form': form, 'view': 'profile', } if is_owner: if request.method == 'POST': if form.is_valid(): monster = form.save(commit=False) monster.save() messages.success(request, 'Saved changes to %s.' % monster) return redirect(return_path) else: # Redisplay form with validation error messages context['validation_errors'] = form.non_field_errors() else: raise PermissionDenied() return render(request, 'herders/profile/profile_monster_edit.html', context) @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) PowerUpFormset = formset_factory(PowerUpMonsterInstanceForm, extra=5, max_num=5) if request.method == 'POST': formset = PowerUpFormset(request.POST) else: formset = PowerUpFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'power_up_formset_action': request.path + '?next=' + return_path, 'power_up_formset': formset, 'view': 'profile', } food_monsters = [] validation_errors = {} if is_owner: if request.method == 'POST': # return render(request, 'herders/view_post_data.html', {'post_data': request.POST}) if formset.is_valid(): # Create list of submitted food monsters for instance in formset.cleaned_data: # Some fields may be blank if user skipped a form input or didn't fill in all 5 if instance: food_monsters.append(instance['monster']) # Check that all food monsters are unique - This is done whether or not user bypassed evolution checks if len(food_monsters) != len(set(food_monsters)): validation_errors['food_monster_unique'] = "You submitted duplicate food monsters. Please select unique monsters for each slot." # Check that monster is not being fed to itself for food in food_monsters: if food == monster: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) if not request.POST.get('ignore_errors', False): # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars." % monster.stars else: # Record state of ignore evolve rules for form redisplay context['ignore_evolve_checked'] = True # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.success(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: return redirect(return_path) else: return redirect( reverse('herders:monster_instance_edit', kwargs={'profile_name':profile_name, 'instance_id': instance_id}) + '?next=' + return_path ) else: context['form_errors'] = formset.errors else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors return render(request, 'herders/profile/profile_power_up.html', context) @login_required() def monster_instance_awaken(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) if monster.monster.is_awakened: return redirect(return_path) form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'summoner': summoner, 'is_owner': is_owner, # Because of @login_required decorator 'return_path': return_path, 'monster': monster, 'awaken_monster_form': form, } if request.method == 'POST' and form.is_valid() and is_owner: # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) if monster.monster.awaken_magic_mats_high: summoner.storage_magic_high -= monster.monster.awaken_magic_mats_high if monster.monster.awaken_magic_mats_mid: summoner.storage_magic_mid -= monster.monster.awaken_magic_mats_mid if monster.monster.awaken_magic_mats_low: summoner.storage_magic_low -= monster.monster.awaken_magic_mats_low if monster.monster.element == Monster.ELEMENT_FIRE: if monster.monster.awaken_ele_mats_high: summoner.storage_fire_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_fire_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_fire_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WATER: if monster.monster.awaken_ele_mats_high: summoner.storage_water_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_water_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_water_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WIND: if monster.monster.awaken_ele_mats_high: summoner.storage_wind_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_wind_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_wind_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_DARK: if monster.monster.awaken_ele_mats_high: summoner.storage_dark_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_dark_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_dark_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_LIGHT: if monster.monster.awaken_ele_mats_high: summoner.storage_light_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_light_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_light_low -= monster.monster.awaken_ele_mats_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() return redirect(return_path) else: storage = summoner.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().iteritems(): available_essences[element] = OrderedDict() for size, cost in essences.iteritems(): available_essences[element][size] = dict() available_essences[element][size]['qty'] = storage[element][size] available_essences[element][size]['sufficient'] = storage[element][size] >= cost print available_essences context['available_essences'] = available_essences return render(request, 'herders/profile/profile_monster_awaken.html', context) @login_required() def monster_instance_duplicate(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:fusion', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, } fusions = Fusion.objects.all().select_related() progress = OrderedDict() if is_owner or summoner.public: for fusion in fusions: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).count() > 0 # Scan summoner's collection for instances each ingredient for ingredient in fusion.ingredients.all(): owned_ingredients = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') sub_fusion_available = Fusion.objects.filter(product=ingredient.awakens_from).exists() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: acquired = False evolved = False leveled = False awakened = False complete = False ingredient_progress = { 'instance': ingredient, 'sub_fusion_available': sub_fusion_available, 'owned': owned_ingredients, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, } ingredients.append(ingredient_progress) fusion_ready = True for i in ingredients: if not i['complete']: fusion_ready = False total_cost = total_awakening_cost(ingredients) total_missing = essences_missing(summoner.get_storage(), total_cost) progress[fusion.product.name] = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_materials': { 'total_cost': total_cost, 'missing': total_missing, }, 'ready': fusion_ready, } # Iterate through again and find any sub-fusions that are possible. Add their missing essences together for a total count from copy import deepcopy for monster, fusion in progress.iteritems(): # print 'Checking sub-fusions for ' + monster combined_total_cost = deepcopy(fusion['awakening_materials']['total_cost']) sub_fusions_found = False # Check if ingredients for this fusion are fuseable themselves for ingredient in fusion['ingredients']: if ingredient['sub_fusion_available'] and not ingredient['acquired']: sub_fusions_found = True # print ' Found sub-fusion for ' + str(ingredient['instance']) # Get the totals for the sub-fusions and add to the current fusion cost sub_fusion = progress.get(ingredient['instance'].awakens_from.name, None) for element, sizes in fusion['awakening_materials']['total_cost'].iteritems(): # print ' element: ' + str(element) if element not in combined_total_cost: combined_total_cost[element] = OrderedDict() # print sub_fusion['awakening_materials']['missing'] for size in set(sizes.keys() + sub_fusion['awakening_materials']['missing'][element].keys()): # print ' size: ' + size # print ' sub fusion: ' + str(sub_fusion['awakening_materials']['total_cost'][element].get(size, 0)) # print ' current combined_total_cost: ' + str(combined_total_cost[element].get(size, 0)) combined_total_cost[element][size] = combined_total_cost[element].get(size, 0) + sub_fusion['awakening_materials']['total_cost'][element].get(size, 0) # print ' new combined_total_cost: ' + str(combined_total_cost[element][size]) if sub_fusions_found: fusion['awakening_materials']['combined'] = essences_missing(summoner.get_storage(), combined_total_cost) context['fusions'] = progress return render(request, 'herders/profile/profile_fusion.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.success(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } return render(request, 'herders/profile/teams/team_detail.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.success(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def bestiary(request): context = { 'view': 'bestiary', } monster_list = cache.get('bestiary_data') if monster_list is None: monster_list = Monster.objects.select_related('awakens_from', 'awakens_to').all() cache.set('bestiary_data', monster_list, 900) context['monster_list'] = monster_list return render(request, 'herders/bestiary.html', context) def bestiary_detail(request, monster_id): monster = get_object_or_404(Monster, pk=monster_id) context = { 'view': 'bestiary', } if monster.is_awakened and monster.awakens_from is not None: base_monster = monster.awakens_from awakened_monster = monster else: base_monster = monster awakened_monster = monster.awakens_to # Run some calcs to provide stat deltas between awakened and unawakened base_stats = base_monster.get_stats() context['base_monster'] = base_monster context['base_monster_stats'] = base_stats context['base_monster_leader_skill'] = base_monster.leader_skill context['base_monster_skills'] = base_monster.skills.all().order_by('slot') if base_monster.awakens_to: awakened_stats = awakened_monster.get_stats() # Calculate change in stats as monster undergoes awakening if base_stats['6']['1']['HP'] is not None: awakened_stats_deltas = dict() for stat, value in base_stats['6']['40'].iteritems(): if awakened_stats['6']['40'][stat] != value: awakened_stats_deltas[stat] = int(round((awakened_stats['6']['40'][stat] / float(value)) * 100 - 100)) if base_monster.speed != awakened_monster.speed: awakened_stats_deltas['SPD'] = awakened_monster.speed - base_monster.speed if base_monster.crit_rate != awakened_monster.crit_rate: awakened_stats_deltas['CRIT_Rate'] = awakened_monster.crit_rate - base_monster.crit_rate if base_monster.crit_damage != awakened_monster.crit_damage: awakened_stats_deltas['CRIT_DMG'] = awakened_monster.crit_damage - base_monster.crit_damage if base_monster.accuracy != awakened_monster.accuracy: awakened_stats_deltas['Accuracy'] = awakened_monster.accuracy - base_monster.accuracy if base_monster.resistance != awakened_monster.resistance: awakened_stats_deltas['Resistance'] = awakened_monster.resistance - base_monster.resistance context['awakened_monster_stats_deltas'] = awakened_stats_deltas context['awakened_monster'] = awakened_monster context['awakened_monster_stats'] = awakened_stats context['awakened_monster_leader_skill'] = awakened_monster.leader_skill context['awakened_monster_skills'] = awakened_monster.skills.all().order_by('slot') return render(request, 'herders/bestiary_detail.html', context) Fixed error if team has no leader. from collections import OrderedDict from django.http import Http404, HttpResponseForbidden from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied from django.contrib import messages from django.contrib.auth import authenticate, login from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.core.cache import cache from django.db import IntegrityError from django.db.models import Q from django.forms.formsets import formset_factory from django.shortcuts import render, redirect, get_object_or_404 from .forms import RegisterUserForm, AddMonsterInstanceForm, EditMonsterInstanceForm, AwakenMonsterInstanceForm, \ PowerUpMonsterInstanceForm, EditEssenceStorageForm, EditSummonerForm, EditUserForm, EditTeamForm, AddTeamGroupForm, \ DeleteTeamGroupForm from .models import Monster, Summoner, MonsterInstance, Fusion, TeamGroup, Team from .fusion import essences_missing, total_awakening_cost def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile', profile_name=user.username, view_mode='list') except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) def profile(request, profile_name=None, view_mode='list', sort_method='grade'): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in. ') summoner = get_object_or_404(Summoner, user__username=profile_name) # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'add_monster_form': AddMonsterInstanceForm(), 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'view_mode': view_mode, 'sort_method': sort_method, 'return_path': request.path, 'view': 'profile', } if is_owner or summoner.public: if view_mode.lower() == 'list': context['monster_stable'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner) return render(request, 'herders/profile/profile_view.html', context) elif view_mode.lower() == 'box': if sort_method == 'grade': monster_stable = OrderedDict() monster_stable['6*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=6).order_by('-level', 'monster__name') monster_stable['5*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=5).order_by('-level', 'monster__name') monster_stable['4*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=4).order_by('-level', 'monster__name') monster_stable['3*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=3).order_by('-level', 'monster__name') monster_stable['2*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=2).order_by('-level', 'monster__name') monster_stable['1*'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, stars=1).order_by('-level', 'monster__name') elif sort_method == 'level': monster_stable = OrderedDict() monster_stable['40'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level=40).order_by('-level', '-stars', 'monster__name') monster_stable['39-31'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__name') monster_stable['30-21'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__name') monster_stable['20-11'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__name') monster_stable['10-1'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, level__lte=10).order_by('-level', '-stars', 'monster__name') elif sort_method == 'attribute': monster_stable = OrderedDict() monster_stable['water'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = MonsterInstance.objects.select_related('monster').filter(owner=summoner, monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable return render(request, 'herders/profile/profile_box.html', context) else: raise Http404('Unknown profile view mode') else: return render(request, 'herders/profile/not_public.html') @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.success(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def profile_storage(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path + '?next=' + return_path context = { 'is_owner': is_owner, 'profile_name': request.user.username, 'summoner': summoner, 'storage_form': form, 'view': 'storage', 'profile_view': 'materials', } if request.method == 'POST' and form.is_valid(): form.save() return redirect(return_path) else: return render(request, 'herders/essence_storage.html', context) @login_required() def monster_instance_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddMonsterInstanceForm(request.POST or None) if form.is_valid() and request.method == 'POST': # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: # Re-show same page but with form filled in and errors shown context = { 'profile_name': profile_name, 'summoner': summoner, 'add_monster_form': form, 'return_path': return_path, 'is_owner': is_owner, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_add.html', context) @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: MonsterInstance.objects.create(owner=summoner, monster=monster_to_add, stars=stars, level=level, fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) return redirect(return_path) else: return HttpResponseForbidden() def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) context = { 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'view': 'profile', } return render(request, 'herders/profile/profile_monster_view.html', context) @login_required() def monster_instance_edit(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = EditMonsterInstanceForm(request.POST or None, instance=monster) form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'summoner': summoner, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'edit_monster_form': form, 'view': 'profile', } if is_owner: if request.method == 'POST': if form.is_valid(): monster = form.save(commit=False) monster.save() messages.success(request, 'Saved changes to %s.' % monster) return redirect(return_path) else: # Redisplay form with validation error messages context['validation_errors'] = form.non_field_errors() else: raise PermissionDenied() return render(request, 'herders/profile/profile_monster_edit.html', context) @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) PowerUpFormset = formset_factory(PowerUpMonsterInstanceForm, extra=5, max_num=5) if request.method == 'POST': formset = PowerUpFormset(request.POST) else: formset = PowerUpFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'monster': monster, 'is_owner': is_owner, 'power_up_formset_action': request.path + '?next=' + return_path, 'power_up_formset': formset, 'view': 'profile', } food_monsters = [] validation_errors = {} if is_owner: if request.method == 'POST': # return render(request, 'herders/view_post_data.html', {'post_data': request.POST}) if formset.is_valid(): # Create list of submitted food monsters for instance in formset.cleaned_data: # Some fields may be blank if user skipped a form input or didn't fill in all 5 if instance: food_monsters.append(instance['monster']) # Check that all food monsters are unique - This is done whether or not user bypassed evolution checks if len(food_monsters) != len(set(food_monsters)): validation_errors['food_monster_unique'] = "You submitted duplicate food monsters. Please select unique monsters for each slot." # Check that monster is not being fed to itself for food in food_monsters: if food == monster: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) if not request.POST.get('ignore_errors', False): # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars." % monster.stars else: # Record state of ignore evolve rules for form redisplay context['ignore_evolve_checked'] = True # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.success(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: return redirect(return_path) else: return redirect( reverse('herders:monster_instance_edit', kwargs={'profile_name':profile_name, 'instance_id': instance_id}) + '?next=' + return_path ) else: context['form_errors'] = formset.errors else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors return render(request, 'herders/profile/profile_power_up.html', context) @login_required() def monster_instance_awaken(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) if monster.monster.is_awakened: return redirect(return_path) form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'summoner': summoner, 'is_owner': is_owner, # Because of @login_required decorator 'return_path': return_path, 'monster': monster, 'awaken_monster_form': form, } if request.method == 'POST' and form.is_valid() and is_owner: # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) if monster.monster.awaken_magic_mats_high: summoner.storage_magic_high -= monster.monster.awaken_magic_mats_high if monster.monster.awaken_magic_mats_mid: summoner.storage_magic_mid -= monster.monster.awaken_magic_mats_mid if monster.monster.awaken_magic_mats_low: summoner.storage_magic_low -= monster.monster.awaken_magic_mats_low if monster.monster.element == Monster.ELEMENT_FIRE: if monster.monster.awaken_ele_mats_high: summoner.storage_fire_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_fire_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_fire_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WATER: if monster.monster.awaken_ele_mats_high: summoner.storage_water_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_water_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_water_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_WIND: if monster.monster.awaken_ele_mats_high: summoner.storage_wind_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_wind_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_wind_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_DARK: if monster.monster.awaken_ele_mats_high: summoner.storage_dark_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_dark_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_dark_low -= monster.monster.awaken_ele_mats_low elif monster.monster.element == Monster.ELEMENT_LIGHT: if monster.monster.awaken_ele_mats_high: summoner.storage_light_high -= monster.monster.awaken_ele_mats_high if monster.monster.awaken_ele_mats_mid: summoner.storage_light_mid -= monster.monster.awaken_ele_mats_mid if monster.monster.awaken_ele_mats_low: summoner.storage_light_low -= monster.monster.awaken_ele_mats_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() return redirect(return_path) else: storage = summoner.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().iteritems(): available_essences[element] = OrderedDict() for size, cost in essences.iteritems(): available_essences[element][size] = dict() available_essences[element][size]['qty'] = storage[element][size] available_essences[element][size]['sufficient'] = storage[element][size] >= cost context['available_essences'] = available_essences return render(request, 'herders/profile/profile_monster_awaken.html', context) @login_required() def monster_instance_duplicate(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile', kwargs={'profile_name': profile_name, 'view_mode': 'list'}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:fusion', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, } fusions = Fusion.objects.all().select_related() progress = OrderedDict() if is_owner or summoner.public: for fusion in fusions: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).count() > 0 # Scan summoner's collection for instances each ingredient for ingredient in fusion.ingredients.all(): owned_ingredients = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') sub_fusion_available = Fusion.objects.filter(product=ingredient.awakens_from).exists() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: acquired = False evolved = False leveled = False awakened = False complete = False ingredient_progress = { 'instance': ingredient, 'sub_fusion_available': sub_fusion_available, 'owned': owned_ingredients, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, } ingredients.append(ingredient_progress) fusion_ready = True for i in ingredients: if not i['complete']: fusion_ready = False total_cost = total_awakening_cost(ingredients) total_missing = essences_missing(summoner.get_storage(), total_cost) progress[fusion.product.name] = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_materials': { 'total_cost': total_cost, 'missing': total_missing, }, 'ready': fusion_ready, } # Iterate through again and find any sub-fusions that are possible. Add their missing essences together for a total count from copy import deepcopy for monster, fusion in progress.iteritems(): # print 'Checking sub-fusions for ' + monster combined_total_cost = deepcopy(fusion['awakening_materials']['total_cost']) sub_fusions_found = False # Check if ingredients for this fusion are fuseable themselves for ingredient in fusion['ingredients']: if ingredient['sub_fusion_available'] and not ingredient['acquired']: sub_fusions_found = True # print ' Found sub-fusion for ' + str(ingredient['instance']) # Get the totals for the sub-fusions and add to the current fusion cost sub_fusion = progress.get(ingredient['instance'].awakens_from.name, None) for element, sizes in fusion['awakening_materials']['total_cost'].iteritems(): # print ' element: ' + str(element) if element not in combined_total_cost: combined_total_cost[element] = OrderedDict() # print sub_fusion['awakening_materials']['missing'] for size in set(sizes.keys() + sub_fusion['awakening_materials']['missing'][element].keys()): # print ' size: ' + size # print ' sub fusion: ' + str(sub_fusion['awakening_materials']['total_cost'][element].get(size, 0)) # print ' current combined_total_cost: ' + str(combined_total_cost[element].get(size, 0)) combined_total_cost[element][size] = combined_total_cost[element].get(size, 0) + sub_fusion['awakening_materials']['total_cost'][element].get(size, 0) # print ' new combined_total_cost: ' + str(combined_total_cost[element][size]) if sub_fusions_found: fusion['awakening_materials']['combined'] = essences_missing(summoner.get_storage(), combined_total_cost) context['fusions'] = progress return render(request, 'herders/profile/profile_fusion.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.success(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader and team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } return render(request, 'herders/profile/teams/team_detail.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) summoner = get_object_or_404(Summoner, user__username=profile_name) is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.success(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def bestiary(request): context = { 'view': 'bestiary', } monster_list = cache.get('bestiary_data') if monster_list is None: monster_list = Monster.objects.select_related('awakens_from', 'awakens_to').all() cache.set('bestiary_data', monster_list, 900) context['monster_list'] = monster_list return render(request, 'herders/bestiary.html', context) def bestiary_detail(request, monster_id): monster = get_object_or_404(Monster, pk=monster_id) context = { 'view': 'bestiary', } if monster.is_awakened and monster.awakens_from is not None: base_monster = monster.awakens_from awakened_monster = monster else: base_monster = monster awakened_monster = monster.awakens_to # Run some calcs to provide stat deltas between awakened and unawakened base_stats = base_monster.get_stats() context['base_monster'] = base_monster context['base_monster_stats'] = base_stats context['base_monster_leader_skill'] = base_monster.leader_skill context['base_monster_skills'] = base_monster.skills.all().order_by('slot') if base_monster.awakens_to: awakened_stats = awakened_monster.get_stats() # Calculate change in stats as monster undergoes awakening if base_stats['6']['1']['HP'] is not None: awakened_stats_deltas = dict() for stat, value in base_stats['6']['40'].iteritems(): if awakened_stats['6']['40'][stat] != value: awakened_stats_deltas[stat] = int(round((awakened_stats['6']['40'][stat] / float(value)) * 100 - 100)) if base_monster.speed != awakened_monster.speed: awakened_stats_deltas['SPD'] = awakened_monster.speed - base_monster.speed if base_monster.crit_rate != awakened_monster.crit_rate: awakened_stats_deltas['CRIT_Rate'] = awakened_monster.crit_rate - base_monster.crit_rate if base_monster.crit_damage != awakened_monster.crit_damage: awakened_stats_deltas['CRIT_DMG'] = awakened_monster.crit_damage - base_monster.crit_damage if base_monster.accuracy != awakened_monster.accuracy: awakened_stats_deltas['Accuracy'] = awakened_monster.accuracy - base_monster.accuracy if base_monster.resistance != awakened_monster.resistance: awakened_stats_deltas['Resistance'] = awakened_monster.resistance - base_monster.resistance context['awakened_monster_stats_deltas'] = awakened_stats_deltas context['awakened_monster'] = awakened_monster context['awakened_monster_stats'] = awakened_stats context['awakened_monster_leader_skill'] = awakened_monster.leader_skill context['awakened_monster_skills'] = awakened_monster.skills.all().order_by('slot') return render(request, 'herders/bestiary_detail.html', context)
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing Neutron Routers. """ import logging from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api LOG = logging.getLogger(__name__) class CreateForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Router Name")) admin_state_up = forms.ChoiceField(label=_("Admin State"), choices=[(True, _('UP')), (False, _('DOWN'))], required=False) external_network = forms.ChoiceField(label=_("External Network"), required=False) mode = forms.ChoiceField(label=_("Router Type")) ha = forms.ChoiceField(label=_("High Availability Mode")) failure_url = 'horizon:project:routers:index' def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) self.dvr_allowed = api.neutron.get_feature_permission(self.request, "dvr", "create") if self.dvr_allowed: mode_choices = [('server_default', _('Use Server Default')), ('centralized', _('Centralized')), ('distributed', _('Distributed'))] self.fields['mode'].choices = mode_choices else: del self.fields['mode'] self.ha_allowed = api.neutron.get_feature_permission(self.request, "l3-ha", "create") if self.ha_allowed: ha_choices = [('server_default', _('Use Server Default')), ('enabled', _('Enable HA mode')), ('disabled', _('Disable HA mode'))] self.fields['ha'].choices = ha_choices else: del self.fields['ha'] networks = self._get_network_list(request) if networks: self.fields['external_network'].choices = networks else: del self.fields['external_network'] def _get_network_list(self, request): search_opts = {'router:external': True} try: networks = api.neutron.network_list(request, **search_opts) except Exception: msg = _('Failed to get network list.') LOG.info(msg) messages.warning(request, msg) networks = [] choices = [(network.id, network.name or network.id) for network in networks] if choices: choices.insert(0, ("", _("Select network"))) return choices def handle(self, request, data): try: params = {'name': data['name']} if 'admin_state_up' in data and data['admin_state_up']: params['admin_state_up'] = data['admin_state_up'] if 'external_network' in data and data['external_network']: params['external_gateway_info'] = {'network_id': data['external_network']} if (self.dvr_allowed and data['mode'] != 'server_default'): params['distributed'] = (data['mode'] == 'distributed') if (self.ha_allowed and data['ha'] != 'server_default'): params['ha'] = (data['ha'] == 'enabled') router = api.neutron.router_create(request, **params) message = _('Router %s was successfully created.') % data['name'] messages.success(request, message) return router except Exception as exc: if exc.status_code == 409: msg = _('Quota exceeded for resource router.') else: msg = _('Failed to create router "%s".') % data['name'] LOG.info(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) return False class UpdateForm(forms.SelfHandlingForm): name = forms.CharField(label=_("Name"), required=False) admin_state = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) router_id = forms.CharField(label=_("ID"), widget=forms.TextInput( attrs={'readonly': 'readonly'})) mode = forms.ChoiceField(label=_("Router Type")) ha = forms.BooleanField(label=_("High Availability Mode"), required=False) redirect_url = reverse_lazy('horizon:project:routers:index') def __init__(self, request, *args, **kwargs): super(UpdateForm, self).__init__(request, *args, **kwargs) self.dvr_allowed = api.neutron.get_feature_permission(self.request, "dvr", "update") if not self.dvr_allowed: del self.fields['mode'] elif kwargs.get('initial', {}).get('mode') == 'distributed': # Neutron supports only changing from centralized to # distributed now. mode_choices = [('distributed', _('Distributed'))] self.fields['mode'].widget = forms.TextInput(attrs={'readonly': 'readonly'}) self.fields['mode'].choices = mode_choices else: mode_choices = [('centralized', _('Centralized')), ('distributed', _('Distributed'))] self.fields['mode'].choices = mode_choices # TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables # PUT operation. It will be fixed in Kilo cycle. # self.ha_allowed = api.neutron.get_feature_permission( # self.request, "l3-ha", "update") self.ha_allowed = False if not self.ha_allowed: del self.fields['ha'] def handle(self, request, data): try: params = {'admin_state_up': (data['admin_state'] == 'True'), 'name': data['name']} if self.dvr_allowed: params['distributed'] = (data['mode'] == 'distributed') if self.ha_allowed: params['ha'] = data['ha'] router = api.neutron.router_update(request, data['router_id'], **params) msg = _('Router %s was successfully updated.') % data['name'] LOG.debug(msg) messages.success(request, msg) return router except Exception: msg = _('Failed to update router %s') % data['name'] LOG.info(msg) exceptions.handle(request, msg, redirect=self.redirect_url) Router namespace not created in controller When creating a router with an external network attached from the Create Router form a router namespace doesn't get created on the controller. By calling the correct APIs, the namespace get created. To check the namespace list run the below command on controller node. 'sudo ip netns list' Change-Id: I829c707353830ff808b6f6a0b29ccedeca588616 Closes-Bug: #1535707 # Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing Neutron Routers. """ import logging from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api LOG = logging.getLogger(__name__) class CreateForm(forms.SelfHandlingForm): name = forms.CharField(max_length=255, label=_("Router Name")) admin_state_up = forms.ChoiceField(label=_("Admin State"), choices=[(True, _('UP')), (False, _('DOWN'))], required=False) external_network = forms.ChoiceField(label=_("External Network"), required=False) mode = forms.ChoiceField(label=_("Router Type")) ha = forms.ChoiceField(label=_("High Availability Mode")) failure_url = 'horizon:project:routers:index' def __init__(self, request, *args, **kwargs): super(CreateForm, self).__init__(request, *args, **kwargs) self.dvr_allowed = api.neutron.get_feature_permission(self.request, "dvr", "create") if self.dvr_allowed: mode_choices = [('server_default', _('Use Server Default')), ('centralized', _('Centralized')), ('distributed', _('Distributed'))] self.fields['mode'].choices = mode_choices else: del self.fields['mode'] self.ha_allowed = api.neutron.get_feature_permission(self.request, "l3-ha", "create") if self.ha_allowed: ha_choices = [('server_default', _('Use Server Default')), ('enabled', _('Enable HA mode')), ('disabled', _('Disable HA mode'))] self.fields['ha'].choices = ha_choices else: del self.fields['ha'] networks = self._get_network_list(request) if networks: self.fields['external_network'].choices = networks else: del self.fields['external_network'] def _get_network_list(self, request): search_opts = {'router:external': True} try: networks = api.neutron.network_list(request, **search_opts) except Exception: msg = _('Failed to get network list.') LOG.info(msg) messages.warning(request, msg) networks = [] choices = [(network.id, network.name or network.id) for network in networks] if choices: choices.insert(0, ("", _("Select network"))) return choices def handle(self, request, data): try: params = {'name': data['name']} if 'admin_state_up' in data and data['admin_state_up']: params['admin_state_up'] = data['admin_state_up'] if (self.dvr_allowed and data['mode'] != 'server_default'): params['distributed'] = (data['mode'] == 'distributed') if (self.ha_allowed and data['ha'] != 'server_default'): params['ha'] = (data['ha'] == 'enabled') router = api.neutron.router_create(request, **params) except Exception as exc: if exc.status_code == 409: msg = _('Quota exceeded for resource router.') else: msg = _('Failed to create router "%s".') % data['name'] LOG.info(msg) redirect = reverse(self.failure_url) exceptions.handle(request, msg, redirect=redirect) return False # workaround for neutron bug #1535707 try: if ('external_network' in data and data['external_network']): api.neutron.router_add_gateway(request, router['id'], data['external_network']) message = _('Router %s was successfully created.') % data['name'] messages.success(request, message) return router except Exception: try: api.neutron.router_delete(request, router['id']) message = _('Router %s was created but connecting to' ' an external network failed. The created' ' router has been deleted, as the overall' ' operation failed.') % data['name'] LOG.info(message) redirect = reverse(self.failure_url) exceptions.handle(request, message, redirect=redirect) return False except Exception: message = _('Router %(name)s was created but connecting to' ' an external network failed. Attempts to' ' delete the new router also failed.' ' Router %(name)s still exists but is not connect' ' to the desired external network.') % { 'name': data['name']} LOG.info(message) redirect = reverse(self.failure_url) exceptions.handle(request, message, redirect=redirect) return False class UpdateForm(forms.SelfHandlingForm): name = forms.CharField(label=_("Name"), required=False) admin_state = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) router_id = forms.CharField(label=_("ID"), widget=forms.TextInput( attrs={'readonly': 'readonly'})) mode = forms.ChoiceField(label=_("Router Type")) ha = forms.BooleanField(label=_("High Availability Mode"), required=False) redirect_url = reverse_lazy('horizon:project:routers:index') def __init__(self, request, *args, **kwargs): super(UpdateForm, self).__init__(request, *args, **kwargs) self.dvr_allowed = api.neutron.get_feature_permission(self.request, "dvr", "update") if not self.dvr_allowed: del self.fields['mode'] elif kwargs.get('initial', {}).get('mode') == 'distributed': # Neutron supports only changing from centralized to # distributed now. mode_choices = [('distributed', _('Distributed'))] self.fields['mode'].widget = forms.TextInput(attrs={'readonly': 'readonly'}) self.fields['mode'].choices = mode_choices else: mode_choices = [('centralized', _('Centralized')), ('distributed', _('Distributed'))] self.fields['mode'].choices = mode_choices # TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables # PUT operation. It will be fixed in Kilo cycle. # self.ha_allowed = api.neutron.get_feature_permission( # self.request, "l3-ha", "update") self.ha_allowed = False if not self.ha_allowed: del self.fields['ha'] def handle(self, request, data): try: params = {'admin_state_up': (data['admin_state'] == 'True'), 'name': data['name']} if self.dvr_allowed: params['distributed'] = (data['mode'] == 'distributed') if self.ha_allowed: params['ha'] = data['ha'] router = api.neutron.router_update(request, data['router_id'], **params) msg = _('Router %s was successfully updated.') % data['name'] LOG.debug(msg) messages.success(request, msg) return router except Exception: msg = _('Failed to update router %s') % data['name'] LOG.info(msg) exceptions.handle(request, msg, redirect=self.redirect_url)
from __future__ import absolute_import, print_function import os import pytest from tests import DATA_DIR from mantid.api import AnalysisDataService from addie.rietveld import event_handler from addie.main import MainWindow bragg_file_names = ['GSAS_NaNO3_230C.gsa', 'GSAS_NaNO3_275C.gsa'] bragg_file_list = [os.path.join(DATA_DIR, name) for name in bragg_file_names] @pytest.fixture def rietveld_event_handler(qtbot): return event_handler def test_load_bragg_files(qtbot, rietveld_event_handler): """Test load_bragg_files when no files to load""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_names) def test_plot_bragg_bank_for_multi_bank(qtbot, rietveld_event_handler): """Test plot_bragg_bank for Multi-Bank mode""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_list) main_window.rietveld_ui.graphicsView_bragg.set_to_single_gss(True) main_window.rietveld_ui.radioButton_multiBank.setChecked(True) main_window.rietveld_ui.radioButton_multiGSS.setChecked(False) rietveld_event_handler.plot_bragg_bank(main_window) def test_plot_bragg_bank_for_multi_gsas(qtbot, rietveld_event_handler): """Test plot_bragg_bank for Multi-GSAS mode""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_list) main_window.rietveld_ui.graphicsView_bragg.set_to_single_gss(False) main_window.rietveld_ui.radioButton_multiBank.setChecked(False) main_window.rietveld_ui.radioButton_multiGSS.setChecked(True) rietveld_event_handler.plot_bragg_bank(main_window) def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): """Test we raise exception for main_window==None to change_gss_mode""" with pytest.raises(NotImplementedError): rietveld_event_handler.evt_change_gss_mode(None) def test_load_bragg_by_filename(qtbot, rietveld_event_handler): """Test that we can load Bragg *.gsa (GSAS) files""" filename = 'NOM_127827.gsa' files = [os.path.join(DATA_DIR, filename)] for filename in files: wksp, angles = rietveld_event_handler.load_bragg_by_filename(filename) wksp == os.path.basename(filename).split('.')[0] angles_exp = [15.1, 31., 65., 120.4, 150.1, 8.6] # copied from file by hand for obs, expected in zip(angles, angles_exp): assert obs == pytest.approx(expected, rel=0.1) assert AnalysisDataService.doesExist(wksp) Add real test for addie.rietveld.event_handler.load_bragg_files from __future__ import absolute_import, print_function import os import pytest from tests import DATA_DIR from mantid.api import AnalysisDataService from addie.rietveld import event_handler from addie.main import MainWindow bragg_file_names = ['GSAS_NaNO3_230C.gsa', 'GSAS_NaNO3_275C.gsa'] bragg_file_list = [os.path.join(DATA_DIR, name) for name in bragg_file_names] @pytest.fixture def rietveld_event_handler(qtbot): return event_handler def test_load_bragg_files_no_files(qtbot, rietveld_event_handler): """Test load_bragg_files when no files to load""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, None) bragg_tree = main_window.rietveld_ui.treeWidget_braggWSList assert bragg_tree.model().rowCount() == 1 def test_load_bragg_files(qtbot, rietveld_event_handler): """Test load_bragg_files when we load in 2 files Will have a row count == 3 since there is the empty 'workspaces' row """ main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_list) bragg_tree = main_window.rietveld_ui.treeWidget_braggWSList assert bragg_tree.model().rowCount() == 3 def test_plot_bragg_bank_for_multi_bank(qtbot, rietveld_event_handler): """Test plot_bragg_bank for Multi-Bank mode""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_list) main_window.rietveld_ui.graphicsView_bragg.set_to_single_gss(True) main_window.rietveld_ui.radioButton_multiBank.setChecked(True) main_window.rietveld_ui.radioButton_multiGSS.setChecked(False) rietveld_event_handler.plot_bragg_bank(main_window) def test_plot_bragg_bank_for_multi_gsas(qtbot, rietveld_event_handler): """Test plot_bragg_bank for Multi-GSAS mode""" main_window = MainWindow() rietveld_event_handler.load_bragg_files(main_window, bragg_file_list) main_window.rietveld_ui.graphicsView_bragg.set_to_single_gss(False) main_window.rietveld_ui.radioButton_multiBank.setChecked(False) main_window.rietveld_ui.radioButton_multiGSS.setChecked(True) rietveld_event_handler.plot_bragg_bank(main_window) def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): """Test we raise exception for main_window==None to change_gss_mode""" with pytest.raises(NotImplementedError): rietveld_event_handler.evt_change_gss_mode(None) def test_load_bragg_by_filename(qtbot, rietveld_event_handler): """Test that we can load Bragg *.gsa (GSAS) files""" filename = 'NOM_127827.gsa' files = [os.path.join(DATA_DIR, filename)] for filename in files: wksp, angles = rietveld_event_handler.load_bragg_by_filename(filename) wksp == os.path.basename(filename).split('.')[0] angles_exp = [15.1, 31., 65., 120.4, 150.1, 8.6] # copied from file by hand for obs, expected in zip(angles, angles_exp): assert obs == pytest.approx(expected, rel=0.1) assert AnalysisDataService.doesExist(wksp)
import numpy as np import matplotlib.pyplot as plt import scipy import lmfit import itertools import logging from pycqed.simulations import transmon from pycqed.utilities.timer import Timer log = logging.getLogger(__name__) ################################# # Fitting Functions Library # ################################# def RandomizedBenchmarkingLeakage(numCliff, pu, pd, p0): # from https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.116.020501 return pu/(pd+pu) * (1-np.exp(-(pd+pu)*numCliff)) + \ p0*np.exp(-(pd+pu)*numCliff) def RandomizedBenchmarkingDecay(numCliff, Amplitude, p, offset): val = Amplitude * (p ** numCliff) + offset return val def DoubleExpDampOscFunc(t, tau_1, tau_2, freq_1, freq_2, phase_1, phase_2, amp_1, amp_2, osc_offset): cos_1 = amp_1 * (np.cos(2 * np.pi * freq_1 * t + phase_1)) * \ np.exp(-(t / tau_1)) cos_2 = amp_2 * (np.cos(2 * np.pi * freq_2 * t + phase_2)) * \ np.exp(-(t / tau_2)) return cos_1 + cos_2 + osc_offset def double_RandomizedBenchmarkingDecay(numCliff, p, offset, invert=1): """ A variety of the RB-curve that allows fitting both the inverting and non-inverting exponential. The amplitude of the decay curve is constrained to start at 0 or 1. The offset is the common point both curves converge to. pick invert to be 1 or 0 """ # Inverting clifford curve val_inv = (1 - offset) * (p ** numCliff) + offset # flipping clifford curve val_flip = -offset * (p ** numCliff) + offset # Using invert as a boolean but not using if statement to allow for # arrays to be input in the function val = (1 - invert) * val_flip + invert * val_inv return val def LorentzFunc(f, amplitude, center, sigma): val = amplitude / np.pi * (sigma / ((f - center) ** 2 + sigma ** 2)) return val def Lorentzian(f, A, offset, f0, kappa): val = offset + A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2)) return val def TwinLorentzFunc(f, A_gf_over_2, A, f0_gf_over_2, f0, kappa_gf_over_2, kappa, background=0): """ Twin lorentz with background. Args: f (float): frequency sweep points in Hz A (float): amplitude of the tallest/deepest Lorentzian structure in the data A_gf_over_2 (float): amplitude of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition f0 (float): frequency of the tallest/deepest Lorentzian structure in the data f0_gf_over_2 (float): frequency of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition kappa (float): kappa (FWHM) of the tallest/deepest Lorentzian structure in the data kappa_gf_over_2 (float): kappa (FWHM) of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition background (float): background offset """ val = (A_gf_over_2 / np.pi * (kappa_gf_over_2 / ((f - f0_gf_over_2) ** 2 + kappa_gf_over_2 ** 2)) + A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2)) + background) return val def Qubit_dac_to_freq(dac_voltage, f_max, dac_sweet_spot=None, V_per_phi0=None, phi_park=None, dac_flux_coefficient=None, E_c=0, asymmetry=0.5): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) dac_sweet_spot (V): voltage at which the sweet-spot is found asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)), ''' # using E_c as fit parameter is numerically not stable (changing E_c does only # slightly change the function. Set E_c to zero if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 qubit_freq = (f_max + E_c) * ( asymmetry ** 2 + (1 - asymmetry ** 2) * np.cos(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) ** 2) ** 0.25 - E_c return qubit_freq def Qubit_dac_to_freq_precise(dac_voltage, Ej_max, E_c, asymmetry, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None ): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) Ejmax (Hz): Maximum Ej of qubit Ec (Hz): charging energy of the qubit d = abs((EJ1-EJ2)/(EJ1+EJ2))) dac_sweet_spot (V): voltage at which the sweet-spot is found V_per_phi0 (V): volt per phi0 (convert voltage to flux ''' if np.ndim(dac_voltage) == 0: dac_voltage = np.array([dac_voltage]) if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 phi = np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot) Ej = 2 * np.pi * Ej_max * np.cos(phi) * np.sqrt(1 + asymmetry ** 2 * np.tan(phi) ** 2) E_c = 2 * np.pi * E_c freqs = [] for ej in Ej: freqs.append((transmon.transmon_levels(E_c, ej) / (2 * np.pi))[0]) qubit_freq = np.array(freqs) return qubit_freq def Qubit_dac_to_freq_res(dac_voltage, Ej_max, E_c, asymmetry, coupling, fr, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None, dim_charge=31, ): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) Ejmax (Hz): Maximum Ej of qubit Ec (Hz): charging energy of the qubit d = abs((EJ1-EJ2)/(EJ1+EJ2))) dac_sweet_spot (V): voltage at which the sweet-spot is found V_per_phi0 (V): volt per phi0 (convert voltage to flux) phi_park ''' return_float = False if np.ndim(dac_voltage) == 0: dac_voltage = np.array([dac_voltage]) return_float = True if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 phi = np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot) Ej = Ej_max * np.cos(phi) * np.sqrt(1 + asymmetry ** 2 * np.tan(phi) ** 2) with Timer('fitmod.loop', verbose=False): freqs = [(transmon.transmon_resonator_levels(E_c, ej, fr, coupling, states=[(1, 0), (2, 0)], dim_charge=dim_charge ))[0] for ej in Ej] qubit_freq = np.array(freqs) return qubit_freq[0] if return_float else qubit_freq def Qubit_freq_to_dac_res(frequency, Ej_max, E_c, asymmetry, coupling, fr, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None, branch='smallest'): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. This function implements the inverse of "Qubit_dac_to_freq" frequency (Hz) Ej_max (Hz): Maximum josephson energy E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) couping (Hz): coupling to resonator fr (Hz): frequency of resonator dac_sweet_spot (V): voltage at which the sweet-spot is found branch (enum: 'positive' 'negative' or "smallest") ''' if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 return_float = False if np.ndim(frequency) == 0: frequency = [frequency] return_float = True E_j = [transmon.transmon_resonator_ej_anh_frg_chi( f, ec=E_c, frb=fr, gb=coupling)[0] for f in frequency] E_j = np.array(E_j) r = E_j / Ej_max if np.any(r > 1): log.warning(f'Ratio Ej/Ej_max is larger than 1 at ' f'indices {np.argwhere(r > 1)}.Truncating to 1.') r[r>1] = 1 phi = np.arccos(np.sqrt((r**2 - asymmetry**2)/(1-asymmetry**2))) if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient dac_voltage_pos = phi * V_per_phi0 / np.pi + dac_sweet_spot dac_voltage_neg = -phi * V_per_phi0 / np.pi + dac_sweet_spot if branch == 'positive': dac_voltage = dac_voltage_pos elif branch == 'negative': dac_voltage = dac_voltage_neg elif branch == 'smallest': if np.ndim(phi) != 0: dac_voltage = np.array([dac_voltage_pos, dac_voltage_neg]) idxs0 = np.argmin(np.abs(dac_voltage), 0) idxs1 = np.arange(len(dac_voltage_pos)) dac_voltage = dac_voltage[idxs0, idxs1] else: dac_voltage = dac_voltage_pos \ if abs(dac_voltage_pos) < abs(dac_voltage_neg) \ else dac_voltage_neg else: raise ValueError('branch {} not recognized'.format(branch)) return dac_voltage[0] if return_float else dac_voltage def Resonator_dac_to_freq(dac_voltage, f_max_qubit, f_0_res, E_c, dac_sweet_spot, coupling, V_per_phi0=None, dac_flux_coefficient=None, asymmetry=0): qubit_freq = Qubit_dac_to_freq(dac_voltage=dac_voltage, f_max=f_max_qubit, E_c=E_c, dac_sweet_spot=dac_sweet_spot, V_per_phi0=V_per_phi0, dac_flux_coefficient=dac_flux_coefficient, asymmetry=asymmetry) delta_qr = (qubit_freq - f_0_res) lamb_shift = (coupling ** 2 / delta_qr) resonator_freq = f_0_res - lamb_shift return resonator_freq def Qubit_dac_to_detun(dac_voltage, f_max, E_c, dac_sweet_spot, V_per_phi0, asymmetry=0): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) dac_sweet_spot (V): voltage at which the sweet-spot is found asymmetry (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) ''' return f_max - Qubit_dac_to_freq(dac_voltage, f_max=f_max, E_c=E_c, dac_sweet_spot=dac_sweet_spot, V_per_phi0=V_per_phi0, asymmetry=asymmetry) def Qubit_freq_to_dac(frequency, f_max, dac_sweet_spot=None, V_per_phi0=None, dac_flux_coefficient=None, asymmetry=0, E_c=0, phi_park=None, branch='smallest'): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. This function implements the inverse of "Qubit_dac_to_freq" frequency (Hz) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) dac_sweet_spot (V): voltage at which the sweet-spot is found branch (enum: 'positive' 'negative') ''' if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 # asymm_term = (asymmetry**2 + (1-asymmetry**2)) # dac_term = np.arccos(((frequency+E_c)/((f_max+E_c) * asymm_term))**2) dac_term = np.arccos(np.sqrt( (((frequency + E_c) / (f_max + E_c)) ** 4 - asymmetry ** 2) / (1 - asymmetry ** 2))) if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient dac_voltage_pos = dac_term * V_per_phi0 / np.pi + dac_sweet_spot dac_voltage_neg = -dac_term * V_per_phi0 / np.pi + dac_sweet_spot if branch == 'positive': dac_voltage = dac_voltage_pos elif branch == 'negative': dac_voltage = dac_voltage_neg elif branch == 'smallest': if np.ndim(dac_term) != 0: dac_voltage = np.array([dac_voltage_pos, dac_voltage_neg]) idxs0 = np.argmin(np.abs(dac_voltage), 0) idxs1 = np.arange(len(dac_voltage_pos)) dac_voltage = dac_voltage[idxs0, idxs1] else: dac_voltage = dac_voltage_pos \ if abs(dac_voltage_pos) < abs(dac_voltage_neg) \ else dac_voltage_neg else: raise ValueError('branch {} not recognized'.format(branch)) return dac_voltage def Qubit_dac_sensitivity(dac_voltage, f_max: float, E_c: float, dac_sweet_spot: float, V_per_phi0: float, asymmetry: float = 0): ''' Derivative of the qubit detuning vs dac at dac_voltage. The returned quantity is "dfreq/dPhi (dac_voltage)" ''' cos_term = np.cos(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) sin_term = np.sin(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) return ((f_max + E_c) * (1 - asymmetry ** 2) * np.pi / (2 * V_per_phi0) * cos_term * sin_term * (asymmetry ** 2 + (1 - asymmetry ** 2) * cos_term ** 2) ** (-0.75)) def QubitFreqDac(dac_voltage, f_max, E_c, dac_sweet_spot, dac_flux_coefficient, asymmetry=0): log.warning('deprecated, replace QubitFreqDac with Qubit_dac_to_freq') return Qubit_dac_to_freq(dac_voltage, f_max, E_c, dac_sweet_spot, dac_flux_coefficient, asymmetry) def QubitFreqFlux(flux, f_max, E_c, flux_zero, dac_offset=0): 'The cosine Arc model for calibrated flux.' calculated_frequency = (f_max + E_c) * np.sqrt(np.abs( np.cos(np.pi * (flux - dac_offset) / flux_zero))) - E_c return calculated_frequency def CosFunc(t, amplitude, frequency, phase, offset): ''' parameters: t, time in s amplitude a.u. frequency in Hz (f, not omega!) phase in rad offset a.u. ''' return amplitude * np.cos(2 * np.pi * frequency * t + phase) + offset def ResidZZFuncJoint(t, amplitude, amplitude1, tau, alpha, t11, frequency, phase, offset): x = t11*alpha without_pulse = amplitude * np.exp(-t/tau)*np.cos( 2*np.pi*frequency*t + phase) + offset with_pulse = amplitude1 * np.exp(-t/tau)*(x*np.exp(-t*alpha/x)*np.cos( 2*np.pi*(frequency+alpha)*t + phase) - np.sin( 2*np.pi*frequency*t + phase))/np.sqrt(1+x**2) + offset return without_pulse, with_pulse # return amplitude * np.exp(-(t / tau) ** n) * (np.cos( # 2 * np.pi * frequency * t + phase) + oscillation_offset) + \ # exponential_offset def ResidZZFunc(t, amplitude, tau, alpha, x, frequency, phase, offset): return amplitude * np.exp(-t/tau)*(x*np.exp(-t*alpha/x)*np.cos( 2*np.pi*(frequency+alpha)*t + phase) - np.sin( 2*np.pi*frequency*t + phase))/np.sqrt(1+x**2) + offset def ExpDecayFunc(t, tau, amplitude, offset, n): return amplitude * np.exp(-(t / tau) ** n) + offset def ExpDecayPmod(t, T2echo, delta, n0, chi, kappa, phase, amplitude, offset): """ specific form of exponential decay used for residual resonator photon readout """ return amplitude * (1 - np.imag(np.exp(-t*(1/T2echo + 2j*np.pi*delta)+1j*( phase-2*n0*chi*(1-np.exp(-t*(kappa + 2j*chi)))/(kappa + 2j*chi)))))+offset def CombinedOszExpDecayFunc(t, tau, tau_gauss, phase, n0, chi, delta, amplitude, oscillation_offset, offset): """ Combination of gaussian and exponential decay """ return amplitude * np.exp(-(t / tau)-(t / tau_gauss))*(np.cos( 2 * np.pi * (n0*2*chi/(2*np.pi)+delta) * t + phase) + oscillation_offset) + offset def idle_error_rate_exp_decay(N, N1, N2, A, offset): """ exponential decay consisting of two components """ return A * np.exp(-N / N1 - (N / N2) ** 2) + offset def gain_corr_ExpDecayFunc(t, tau, amp, gc): """ Specific form of an exponential decay used for flux corrections. Includes a "gain correction" parameter that is ignored when correcting the distortions. """ y = gc * (1 + amp * np.exp(-t / tau)) return y def gain_corr_double_ExpDecayFunc(t, tau_A, tau_B, amp_A, amp_B, gc): """ Specific form of an exponential decay used for flux corrections. Includes a "gain correction" parameter that is ignored when correcting the distortions. """ y = gc * (1 + amp_A * np.exp(-t / tau_A) + amp_B * np.exp(-t / tau_B)) return y def ExpDampOscFunc(t, tau, n, frequency, phase, amplitude, oscillation_offset, exponential_offset): return amplitude * np.exp(-(t / tau) ** n) * (np.cos( 2 * np.pi * frequency * t + phase) + oscillation_offset) + \ exponential_offset def GaussExpDampOscFunc(t, tau, tau_2, frequency, phase, amplitude, oscillation_offset, exponential_offset): return amplitude * np.exp(-(t / tau_2) ** 2 - (t / tau)) * (np.cos( 2 * np.pi * frequency * t + phase) + oscillation_offset) + exponential_offset def ExpDampDblOscFunc(t, tau, n, freq_1, freq_2, phase_1, phase_2, amp_1, amp_2, osc_offset_1, osc_offset_2, exponential_offset): ''' Exponential decay with double cosine modulation ''' exp_decay = np.exp(-(t / tau) ** n) cos_1 = (np.cos( 2 * np.pi * freq_1 * t + phase_1) + osc_offset_1) cos_2 = (np.cos( 2 * np.pi * freq_2 * t + phase_2) + osc_offset_2) return amp_1 * exp_decay * cos_1 + amp_2 * exp_decay * cos_2 + exponential_offset def HangerFuncAmplitude(f, f0, Q, Qe, A, theta): ''' This is the function for a hanger which does not take into account a possible slope. This function may be preferred over SlopedHangerFunc if the area around the hanger is small. In this case it may misjudge the slope Theta is the asymmetry parameter Note! units are inconsistent f is in Hz f0 is in GHz ''' return abs(A * (1. - Q / Qe * np.exp(1.j * theta) / (1. + 2.j * Q * (f / 1.e9 - f0) / f0))) def HangerFuncComplex(f, pars): ''' This is the complex function for a hanger which DOES NOT take into account a possible slope Input: f = frequency pars = parameters dictionary f0, Q, Qe, A, theta, phi_v, phi_0 Author: Stefano Poletto ''' f0 = pars['f0'] Q = pars['Q'] Qe = pars['Qe'] A = pars['A'] theta = pars['theta'] phi_v = pars['phi_v'] phi_0 = pars['phi_0'] S21 = A * (1 - Q / Qe * np.exp(1j * theta) / (1 + 2.j * Q * (f / 1.e9 - f0) / f0)) * \ np.exp(1j * (phi_v * f + phi_0)) return S21 def PolyBgHangerFuncAmplitude(f, f0, Q, Qe, A, theta, poly_coeffs): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible polynomial background # NOT DEBUGGED return np.abs((1. + np.polyval(poly_coeffs, (f / 1.e9 - f0) / f0)) * HangerFuncAmplitude(f, f0, Q, Qe, A, theta)) def SlopedHangerFuncAmplitude(f, f0, Q, Qe, A, theta, slope): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible slope df return np.abs((1. + slope * (f / 1.e9 - f0) / f0) * HangerFuncAmplitude(f, f0, Q, Qe, A, theta)) def SlopedHangerFuncComplex(f, f0, Q, Qe, A, theta, phi_v, phi_0, slope): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible slope df return (1. + slope * (f / 1.e9 - f0) / f0) * np.exp(1.j * (phi_v * f + phi_0 - phi_v * f[0])) * \ HangerFuncComplex(f, f0, Q, Qe, A, theta) def linear_with_offset(x, a, b): ''' A linear signal with a fixed offset. ''' return a * x + b def linear_with_background(x, a, b): ''' A linear signal with a fixed background. ''' return np.sqrt((a * x) ** 2 + b ** 2) def linear_with_background_and_offset(x, a, b, c): ''' A linear signal with a fixed background. ''' return np.sqrt((a * x) ** 2 + b ** 2) + c def gaussianCDF(x, amplitude, mu, sigma): """ CDF of gaussian is P(X<=x) = .5 erfc((mu-x)/(sqrt(2)sig)) """ return 0.5 * amplitude * scipy.special.erfc((mu - x) / (np.sqrt(2)*sigma)) def double_gaussianCDF(x, A_amplitude, A_mu, A_sigma, B_amplitude, B_mu, B_sigma): """ CDF of two gaussians added on top of each other. uses "gaussianCDF" """ CDF_A = gaussianCDF(x, amplitude=A_amplitude, mu=A_mu, sigma=A_sigma) CDF_B = gaussianCDF(x, amplitude=B_amplitude, mu=B_mu, sigma=B_sigma) return CDF_A + CDF_B def TwoErrorFunc(x, amp, mu_A, mu_B, sigma, offset): ''' parameters: ''' return offset + double_gaussianCDF(x, amp, mu_A, sigma, -amp, mu_B, sigma) def ro_gauss(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): ''' Two double-gaussians with sigma and mu/center of the residuals equal to the according state. ''' gauss = lmfit.lineshapes.gaussian A_gauss = gauss(x=x[0], center=A_center, sigma=A_sigma, amplitude=A_amplitude) B_gauss = gauss(x=x[1], center=B_center, sigma=B_sigma, amplitude=B_amplitude) gauss0 = ((1-A_spurious)*A_gauss + A_spurious*B_gauss) gauss1 = ((1-B_spurious)*B_gauss + B_spurious*A_gauss) return [gauss0, gauss1] def ro_CDF(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): cdf = gaussianCDF A_gauss = cdf(x=x[0], mu=A_center, sigma=A_sigma, amplitude=A_amplitude) B_gauss = cdf(x=x[1], mu=B_center, sigma=B_sigma, amplitude=B_amplitude) gauss0 = ((1-A_spurious)*A_gauss + A_spurious*B_gauss) gauss1 = ((1-B_spurious)*B_gauss + B_spurious*A_gauss) return [gauss0, gauss1] def ro_CDF_discr(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): #A_amplitude /= 1-A_spurious #B_amplitude /= 1-B_spurious return ro_CDF(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious=0, B_spurious=0) def gaussian_2D(x, y, amplitude=1, center_x=0, center_y=0, sigma_x=1, sigma_y=1): ''' A 2D gaussian function. if you want to use this for fitting you need to flatten your data first. ''' gauss = lmfit.lineshapes.gaussian val = (gauss(x, amplitude, center_x, sigma_x) * gauss(y, amplitude, center_y, sigma_y)) return val def DoubleExpDecayFunc(t, tau1, tau2, amp1, amp2, offset, n): return (offset + amp1 * np.exp(-(t / tau1) ** n) + amp2 * np.exp(-(t / tau2) ** n)) def TripleExpDecayFunc(t, tau1, tau2, tau3, amp1, amp2, amp3, offset, n): return (offset + amp1 * np.exp(-(t / tau1) ** n) + amp2 * np.exp(-(t / tau2) ** n) + amp3 * np.exp(-(t / tau3) ** n)) def avoided_crossing_mediated_coupling(flux, f_bus, f_center1, f_center2, c1, c2, g, flux_state=0): """ Calculates the frequencies of an avoided crossing for the following model. [f_b, g, g ] [g, f_1, 0 ] [g, 0, f_2] f1 = c1*flux + f_center1 f2 = c2*flux + f_center2 f_b = constant g: the coupling strength, beware to relabel your variable if using this model to fit J1 or J2. flux_state: this is a switch used for fitting. It determines which transition to return """ if type(flux_state) == int: flux_state = [flux_state] * len(flux) frequencies = np.zeros([len(flux), 2]) for kk, dac in enumerate(flux): f_1 = dac * c1 + f_center1 f_2 = dac * c2 + f_center2 matrix = [[f_bus, g, g], [g, f_1, 0.], [g, 0., f_2]] frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2] result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1]) return result def avoided_crossing_direct_coupling(flux, f_center1, f_center2, c1, c2, g, flux_state=0): """ Calculates the frequencies of an avoided crossing for the following model. [f_1, g ] [g, f_2] f1 = c1*flux + f_center1 f2 = c2*flux + f_center2 g: the coupling strength, beware to relabel your variable if using this model to fit J1 or J2. flux_state: this is a switch used for fitting. It determines which transition to return """ if type(flux_state) == int: flux_state = [flux_state] * len(flux) frequencies = np.zeros([len(flux), 2]) for kk, dac in enumerate(flux): f_1 = dac * c1 + f_center1 f_2 = dac * c2 + f_center2 matrix = [[f_1, g], [g, f_2]] frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2] result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1]) return result def ErfWindow(t, t_start, t_end, t_rise, amplitude, offset): ''' parameters: t, time in s t_start, start of window in s t_end, end of window in s amplitude a.u. offset a.u. t_rise in s (rise time) ''' return offset + amplitude/2*(scipy.special.erf((t - t_start)/(t_rise/2.6)) - scipy.special.erf((t - t_end)/(t_rise/2.6))) def hanger_with_pf(f, phi, J, kappa_pf, omega_ro, omega_pf, gamma_ro, A): S12 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f-omega_ro))/ (4*J*J+(kappa_pf-2j*(f-omega_pf))* (gamma_ro-2j*(f-omega_ro))) ) return S12 def simultan_hanger_with_pf(f, phi, J, kappa_pf, omega_ro_0, omega_ro_1, omega_pf, gamma_ro, A): f0 = f[:int(len(f)/2)] f1 = f[int(len(f)/2):] S12_0 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f0-omega_ro_0))/ (4*J*J+(kappa_pf-2j*(f0-omega_pf))* (gamma_ro-2j*(f0-omega_ro_0))) ) S12_1 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f1-omega_ro_1))/ (4*J*J+(kappa_pf-2j*(f1-omega_pf))* (gamma_ro-2j*(f1-omega_ro_1))) ) return np.hstack((S12_0, S12_1)) def fit_hanger_with_pf(model, data, simultan=False): """ model: a lmfit model of the pf_ro_S12 function data: of the shape [[freq,|S12|],[freq,|S12|],...] where freq should be in GHz return: a lmfit model.fit object """ def hanger_with_pf_guess(model, data): A_guess = 1 midmax = (data[0, 1]+data[-1, 1])/2 J_guess = 0.006 omega_pf_guess = min(data, key = lambda t: t[1])[0] pf_filter = [False]*len(data) for x in range(0,len(data)): if data[x,1] <= midmax: pf_filter[x] = True kappa_pf_guess = min(2*abs(list(itertools.compress( data[:, 0],pf_filter))[0]- omega_pf_guess), 2*abs(list(itertools.compress( data[:, 0],pf_filter))[-1]- omega_pf_guess), 0.05) omega_ro_filter = [True]*len(data) for x in range(0,len(data)): if abs(data[x, 0]-omega_pf_guess) <= kappa_pf_guess/2: omega_ro_filter[x] = False omega_ro_guess = min(list(itertools.compress( data,omega_ro_filter)), key = lambda t: t[1])[0] for x in range(0,len(data)): if min(omega_pf_guess, omega_ro_guess) <= data[x,0]: if data[x,0] <= max(omega_pf_guess, omega_ro_guess): if midmax <= data[x,1]: midmax = data[x,0] model.set_param_hint('A', value=A_guess, min = 0, max = 1) model.set_param_hint('phi', value=4 , min = 0, max = 2*np.pi) model.set_param_hint('J', value=J_guess, min=0, max=0.015) model.set_param_hint('kappa_pf', value=kappa_pf_guess, min=0, max=0.05) model.set_param_hint('omega_pf', value=omega_pf_guess, min=0) model.set_param_hint('gamma_ro', value=0.0001, vary = False) model.set_param_hint('omega_ro', value=omega_ro_guess) params = model.make_params() return params if simultan: data_loc_0 = np.copy(data[0][:-2]) data_loc_1 = np.copy(data[1][:-2]) fit0 = fit_hanger_with_pf(HangerWithPfModel, np.transpose( [data_loc_0[:,0], data_loc_0[:,1]]), simultan=False) fit1 = fit_hanger_with_pf(HangerWithPfModel, np.transpose( [data_loc_1[:,0], data_loc_1[:,1]]), simultan=False) if not len(data_loc_1) == len(data_loc_0): if len(data_loc_1) < len(data_loc_0): data_loc_0 = data_loc_0[:len(data_loc_1)] else: data_loc_1 = data_loc_1[:len(data_loc_0)] data_loc_0[:,0] *= 1e-9 data_loc_1[:,0] *= 1e-9 data_loc = np.vstack((data_loc_0, data_loc_1)) phi_guess = (fit0.params['phi'].value+fit1.params['phi'].value)/2 J_guess = (fit0.params['J'].value+fit1.params['J'].value)/2*1e-9 kap_guess = (fit0.params['kappa_pf'].value+ fit1.params['kappa_pf'].value)/2*1e-9 omega_pf_guess = (fit0.params['omega_pf'].value+ fit1.params['omega_pf'].value)/2*1e-9 model.set_param_hint('A', value=1, min = 0, max = 1) model.set_param_hint('A', value=1, min = 0, max = 1) model.set_param_hint('phi', value=phi_guess , min = 0, max = 2*np.pi) model.set_param_hint('J', value=J_guess, min=0, max=0.015) model.set_param_hint('kappa_pf', value=kap_guess, min=0, max=0.05) model.set_param_hint('omega_pf', value=omega_pf_guess, min=0) model.set_param_hint('gamma_ro', value=0.0001, vary = False) model.set_param_hint('omega_ro_0', value= fit0.params['omega_ro'].value*1e-9, min=0) model.set_param_hint('omega_ro_1', value= fit1.params['omega_ro'].value*1e-9, min=0) guess = model.make_params() fit_out = model.fit( data_loc[:,1]/max(data_loc[:,1]), guess, f=data_loc[:,0]) tol = 0.4 else: data_loc = np.copy(data) data_loc[:,0] *= 1e-9 guess = hanger_with_pf_guess(model,np.transpose( [data_loc[:,0],data_loc[:,1]/max(data_loc[:,1])])) fit_out = model.fit( data_loc[:,1]/ max(data_loc[:,1]),guess,f=data_loc[:,0],) tol = 0.1 if fit_out.chisqr > tol: fit_lst = [] for shift_pf in np.linspace(-0.01, 0.01, 5): for shift_ro in np.linspace(-0.01, 0.01, 5): for phase in np.linspace(0, 2*np.pi, 6): for kappa_shift in np.linspace(-0.015,0.015,4): for J_shift in np.linspace(-0.002,0.004,4): if simultan: fit_lst.append(model.fit( data_loc[:,1]/ max(data_loc[:,1]),guess, f=data_loc[:,0], phi=phase,omega_pf=float(guess['omega_pf'])+shift_pf, omega_ro_0=float(guess['omega_ro_0'])+shift_ro, omega_ro_1=float(guess['omega_ro_1'])+shift_ro, kappa_pf=float(guess['kappa_pf'])+kappa_shift, J=float(guess['J'])+J_shift)) else: fit_lst.append(model.fit( data_loc[:,1]/max( data_loc[:,1]),guess,f=data_loc[:,0],phi=phase, omega_pf=float(guess['omega_pf'])+shift_pf, omega_ro=float(guess['omega_ro'])+shift_ro, kappa_pf=float(guess['kappa_pf'])+kappa_shift, J=float(guess['J'])+J_shift)) if fit_lst[-1].chisqr <= tol: break chisqr_lst = [fit.chisqr for fit in fit_lst] fit_out = fit_lst[np.argmin(chisqr_lst)] fit_out.params['J'].max = 15e6 fit_out.params['kappa_pf'].max = 50e6 fit_out.params['omega_pf'].value *= 1e9 fit_out.params['kappa_pf'].value *= 1e9 fit_out.params['J'].value *= 1e9 fit_out.params['gamma_ro'].value *= 1e9 fit_out.params['A'].value *= max(data_loc[:,1]) if simultan: fit_out.params['omega_ro_0'].value *= 1e9 fit_out.params['omega_ro_1'].value *= 1e9 else: fit_out.params['omega_ro'].value *= 1e9 if fit_out.chisqr > tol: log.warning('The fit did not converge properly: chi^2 = ' ''+str(fit_out.chisqr)) #Unpack simultan fits if simultan: fit0.params['omega_pf'].value = fit_out.params['omega_pf'].value fit0.params['kappa_pf'].value = fit_out.params['kappa_pf'].value fit0.params['J'].max = fit_out.params['J'].max fit0.params['J'].value = fit_out.params['J'].value fit0.params['gamma_ro'].value = fit_out.params['gamma_ro'].value fit0.params['omega_ro'].value = fit_out.params['omega_ro_0'].value fit1.params['omega_pf'].value = fit_out.params['omega_pf'].value fit1.params['kappa_pf'].value = fit_out.params['kappa_pf'].value fit1.params['J'].max = fit_out.params['J'].max fit1.params['J'].value = fit_out.params['J'].value fit1.params['gamma_ro'].value = fit_out.params['gamma_ro'].value fit1.params['omega_ro'].value = fit_out.params['omega_ro_1'].value fit_out = [fit0, fit1] return fit_out ###################### # Residual functions # ###################### def residual_complex_fcn(pars, cmp_fcn, x, y): ''' Residual of a complex function with complex results 'y' and real input values 'x' For resonators 'x' is the the frequency, 'y' the complex transmission Input: pars = parameters dictionary (check the corresponding function 'cmp_fcn' for the parameters to pass) cmp_fcn = complex function x = input real values to 'cmp_fcn' y = output complex values from 'cmp_fcn' Author = Stefano Poletto ''' cmp_values = cmp_fcn(x, pars) res = cmp_values - y res = np.append(res.real, res.imag) return res #################### # Guess functions # #################### def exp_dec_guess(model, data, t): ''' Assumes exponential decay in estimating the parameters ''' offs_guess = data[np.argmax(t)] amp_guess = data[np.argmin(t)] - offs_guess # guess tau by looking for value closest to 1/e tau_guess = t[np.argmin(abs((amp_guess * (1 / np.e) + offs_guess) - data))] model.set_param_hint('amplitude', value=amp_guess) model.set_param_hint('tau', value=tau_guess) model.set_param_hint('n', value=1, vary=False) model.set_param_hint('offset', value=offs_guess) params = model.make_params() return params def group_consecutives(vals, step=1): """Return list of consecutive lists of numbers from vals (number list).""" run = [] result = [run] expect = None for v in vals: if (v == expect) or (expect is None): run.append(v) else: run = [v] result.append(run) expect = v + step return result def arc_guess(freq, dac, dd=0.1): ''' Expects the dac values to be sorted! :param freq: :param dac: :param dd: :return: ''' p = round(max(dd * len(dac), 1)) f_small = np.average(np.sort(freq)[:p]) + np.std(np.sort(freq)[:p]) f_big = np.average(np.sort(freq)[-p:]) - np.std(np.sort(freq)[-p:]) #print(f_small * 1e-9, f_big * 1e-9) fmax = np.max(freq) fmin = np.min(freq) dacs_ss = np.where(freq >= f_big)[0] dacs_as = np.where(freq <= f_small)[0] dacs_ss_groups = group_consecutives(vals=dacs_ss, step=1) dacs_as_groups = group_consecutives(vals=dacs_as, step=1) dacs_ss_single = [] for g in dacs_ss_groups: ind = g[np.argmax(freq[g])] # ind = int(round(np.average(g))) dacs_ss_single.append(ind) dac_ss_group_index = np.argmin(np.abs(dac[dacs_ss_single])) dac_ss_index = dacs_ss_single[dac_ss_group_index] min_left = 0 min_right = len(dac) - 1 dacs_as_single = [] for g in dacs_as_groups: if 0 in g: ind = 0 elif len(dac) - 1 in g: ind = len(dac) - 1 else: ind = int(round(np.average(g))) if ind < dac_ss_index: min_left = max(ind, min_left) elif ind > dac_ss_index: min_right = min(ind, min_right) dacs_as_single.append(ind) # print('maxs', dacs_ss_single) # print('mins', dacs_as_single) arc_len = (dac[min_right] - dac[min_left]) # print('%d to %d = %.5f' % (min_left, min_right, arc_len)) if min_left == 0 or min_right == len(dac) - 1: arc_len *= 2 elif len(dacs_ss_groups) > 1: arc_len = np.average(dac[dacs_ss_single[1:]] - dac[dacs_ss_single[:-1]]) return fmax, fmin, dac[dac_ss_index], arc_len def Resonator_dac_arch_guess(model, freq, dac_voltage, f_max_qubit: float = None, E_c: float = None): fmax, fmin, dac_ss, period = arc_guess(freq=freq, dac=dac_voltage) coup_guess = 15e6 # todo make better f_res guess f_res = np.mean(freq) # - (coup_guess ** 2 / (f_max_qubit - fmax)) f_max_qubit_vary = f_max_qubit is None f_max_qubit = f_max_qubit or f_res - 500e6 model.set_param_hint('f_0_res', value=f_res, min=f_res / 2, max=2 * f_res) model.set_param_hint('f_max_qubit', value=f_max_qubit, min=3e9, max=8.5e9, vary=f_max_qubit_vary) model.set_param_hint('dac_sweet_spot', value=dac_ss, min=(dac_ss - 0.005) / 2, max=2 * (dac_ss + 0.005)) model.set_param_hint('V_per_phi0', value=period, min=(period - 0.005) / 3, max=5 * (period + 0.005)) model.set_param_hint('asymmetry', value=0, max=1, min=-1) model.set_param_hint('coupling', value=coup_guess, min=1e6, max=80e6) E_c = E_c or 260e6 model.set_param_hint('E_c', value=E_c, min=50e6, max=400e6) params = model.make_params() return params def Qubit_dac_arch_guess(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('f_max', value=fixed_params.get('f_max', f_max), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def Qubit_dac_arch_guess_precise(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('Ej_max', value=fixed_params.get('Ej_max'), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def Qubit_dac_arch_guess_res(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('Ej_max', value=fixed_params.get('Ej_max'), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) model.set_param_hint('coupling', value=fixed_params.get('coupling', 100e6), vary=not 'coupling' in fixed_params) model.set_param_hint('fr', value=fixed_params.get('fr', 7e9), vary=not 'fr' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def idle_err_rate_guess(model, data, N): ''' Assumes exponential decay in estimating the parameters ''' amp_guess = 0.5 offset = np.mean(data) N1 = np.mean(N) N2 = np.mean(N) params = model.make_params(A=amp_guess, N1=N1, N2=N2, offset=offset) return params def fft_freq_phase_guess(data, t, freq_guess=None): ''' Guess for a cosine fit using FFT, only works for evenly spaced points ''' # Freq guess ! only valid with uniform sampling # Only first half of array is used, because the second half contains the # negative frequecy components, and we want a positive frequency. w = np.fft.fft(data)[:len(data) // 2] f = np.fft.fftfreq(len(data), t[1] - t[0])[:len(w)] if freq_guess is None: w[0] = 0 # Removes DC component from fourier transform # Use absolute value of complex valued spectrum abs_w = np.abs(w) freq_guess = abs(f[abs_w == max(abs_w)][0]) ph_guess = 2 * np.pi - (2 * np.pi * t[data == max(data)] * freq_guess)[0] # the condition data == max(data) can have several solutions # (for example when discretization is visible) # to prevent errors we pick the first solution return freq_guess, ph_guess def Cos_guess(model, data, t, **kwargs): """ Tip: to use this assign this guess function as a method to a model use: model.guess = Cos_guess.__get__( model, model.__class__) """ amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention offs_guess = np.mean(data) freq_guess, ph_guess = fft_freq_phase_guess(data, t, **kwargs) model.set_param_hint('period', expr='1/frequency') params = model.make_params(amplitude=amp_guess, frequency=freq_guess, phase=ph_guess, offset=offs_guess) params['amplitude'].min = 0 # Ensures positive amp params['frequency'].min = 0 return params def exp_damp_osc_guess(model, data, t, n_guess=1): """ Makes a guess for an exponentially damped oscillation. Uses the fft_freq_phase guess to guess the oscillation parameters. The guess for the exponential is simpler as it sets the exponent (n) at 1 and the tau at 2/3 of the total range """ amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention freq_guess, ph_guess = fft_freq_phase_guess(data, t) osc_offs_guess = 0 tau_guess = 2 / 3 * max(t) exp_offs_guess = np.mean(data) params = model.make_params(amplitude=amp_guess, frequency=freq_guess, phase=ph_guess, oscillation_offset=osc_offs_guess, exponential_offset=exp_offs_guess, n=n_guess, tau=tau_guess) return params def Cos_amp_phase_guess(model, data, f, t): ''' Guess for a cosine fit with fixed frequency f. ''' amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention offs_guess = np.mean(data) ph_guess = (-2 * np.pi * t[data == max(data)] * f)[0] # the condition data == max(data) can have several solutions # (for example when discretization is visible) # to prevent errors we pick the first solution # model.set_param_hint('period', expr='1') params = model.make_params(amplitude=amp_guess, phase=ph_guess, offset=offs_guess) params['amplitude'].min = 0 # Ensures positive amp return params def gauss_2D_guess(model, data, x, y): ''' takes the mean of every row/column and then uses the regular gauss guess function to get a guess for the model parameters. Assumptions on input data * input is a flattened version of a 2D grid. * total surface under the gaussians sums up to 1. Potential improvements: Make the input also accept a 2D grid of data to prevent reshaping. Find a way to extract amplitude guess from data itself, note that taking the sum of the data (which should correspond to all data under the curve) does not do the trick. Note: possibly not compatible if the model uses prefixes. ''' dx = x[1:]-x[:-1] dy = y[1:]-y[:-1] sums = np.sum(((data[:-1,:-1]*dx).transpose()*dy)) amp = np.sqrt(sums) data_grid = data.reshape(-1, len(np.unique(x))) x_proj_data = np.mean(data_grid, axis=0) y_proj_data = np.mean(data_grid, axis=1) x.sort() y.sort() xm = lmfit.models.GaussianModel() ym = lmfit.models.GaussianModel() x_guess = xm.guess(data=x_proj_data, x=np.unique(x)) x_res = xm.fit(data=x_proj_data, x=np.unique(x), params=x_guess) y_guess = ym.guess(data=y_proj_data, x=np.unique(y)) y_res = ym.fit(data=y_proj_data, x=np.unique(y), params=y_guess) x_guess = x_res.params y_guess = y_res.params model.set_param_hint('amplitude', value=amp, min=0.9*amp, max=1.1*amp, vary=True) model.set_param_hint('sigma_x', value=x_guess['sigma'].value, min=0, vary=True) model.set_param_hint('sigma_y', value=y_guess['sigma'].value, min=0, vary=True) params = model.make_params(center_x=x_guess['center'].value, center_y=y_guess['center'].value,) return params def double_gauss_2D_guess(model, data, x, y): ''' takes the mean of every row/column and then uses the guess function of the double gauss. Assumptions on input data * input is a flattened version of a 2D grid. Note: possibly not compatible if the model uses prefixes. Note 2: see also gauss_2D_guess() for some notes on how to improve this function. ''' data_grid = data.reshape(-1, len(np.unique(x))) x_proj_data = np.mean(data_grid, axis=0) y_proj_data = np.mean(data_grid, axis=1) # The syntax here is slighly different than when calling a regular guess # function because I do not overwrite the class attribute properly. x_guess = double_gauss_guess(model=None, data=x_proj_data, x=np.unique(x)) y_guess = double_gauss_guess(model=None, data=y_proj_data, x=np.unique(y)) if model is not None: pars = model.make_params(A_sigma_x=x_guess['A_sigma'], A_sigma_y=y_guess['A_sigma'], A_center_x=x_guess['A_center'], A_center_y=y_guess['A_center'], A_amplitude=1, B_sigma_x=x_guess['B_sigma'], B_sigma_y=y_guess['B_sigma'], B_center_y=y_guess['B_center'], B_center_x=x_guess['B_center'], B_amplitude=1) return pars else: return x_guess, y_guess def double_gauss_guess(model, data, x=None, **kwargs): ''' Finds a guess for the intial parametes of the double gauss model. Guess is based on taking the cumulative sum of the data and finding the points corresponding to 25% and 75% it finds sigma by using the property that ~33% of the data is contained in the range mu-sigma to mu+sigma. Tip: to use this assign this guess function as a method to a model use: model.guess = double_gauss_guess.__get__( model, model.__class__) ''' if x is None: x = np.arange(len(data)) cdf = np.cumsum(data) norm_cdf = cdf / cdf[-1] par_dict = {'A_center': x[(np.abs(norm_cdf - 0.25)).argmin()], 'B_center': x[(np.abs(norm_cdf - 0.75)).argmin()], 'A_sigma': (x[(np.abs(norm_cdf - 0.25 - .33 / 2)).argmin()] - x[(np.abs(norm_cdf - 0.25 + .33 / 2)).argmin()]), 'B_sigma': (x[(np.abs(norm_cdf - 0.75 - .33 / 2)).argmin()] - x[(np.abs(norm_cdf - 0.75 + .33 / 2)).argmin()])} amp = max(data) * (par_dict['A_sigma'] + par_dict['B_sigma']) / 2. if model is not None: # Specify explicitly because not all pars are set to those from the par # dict pars = model.make_params(A_center=par_dict['A_center'], B_center=par_dict['B_center'], A_sigma=par_dict['A_sigma'], B_sigma=par_dict['B_sigma'], A_amplitude=amp, B_amplitude=amp) return pars # The else clause is added explicitly to reuse this function for the # 2D double gauss model else: return par_dict def ro_double_gauss_guess(model, data, x, fixed_p01 = False, fixed_p10 = False): # An initial guess is done on the binned data with single gaussians # to constrain the fit params and avoid fitting noise if # e.g., mmt. ind. rel. is very low gmod0 = lmfit.models.GaussianModel() guess0 = gmod0.guess(data=data[0], x=x[0]) gmod1 = lmfit.models.GaussianModel() guess1 = gmod1.guess(data=data[1], x=x[1]) model.set_param_hint( 'A_center', vary=True, value=guess0['center'].value, min=guess0['center'] - 2 * guess0['sigma'], max=guess0['center'] + 2 * guess0['sigma']) model.set_param_hint( 'B_center', vary=True, value=guess1['center'].value, min=guess1['center'] - 2 * guess1['sigma'], max=guess1['center'] + 2 * guess1['sigma']) model.set_param_hint('A_sigma', value=guess0['sigma'].value, vary=True) model.set_param_hint('B_sigma', value=guess1['sigma'].value, vary=True) # Amplitudes intarea0 = sum_int(x=x[0], y=data[0])[-1] intarea1 = sum_int(x=x[1], y=data[1])[-1] model.set_param_hint('A_amplitude', value=intarea0, vary=False) model.set_param_hint('B_amplitude', value=intarea1, vary=False) model.set_param_hint('SNR', expr='abs(A_center-B_center)*2/(A_sigma+B_sigma)', vary=False) # Spurious excitement f = np.sqrt(2*np.pi) amp0 = 0.99 * np.max(data[0]) * guess0['sigma'] * f amp1 = 0.99 * np.max(data[1]) * guess1['sigma'] * f spurious0 = max(1-(amp0/intarea0), 1e-3) spurious1 = max(1-(amp1/intarea1), 1e-3) p01 = fixed_p01 if fixed_p01 is not False else spurious0 p10 = fixed_p10 if fixed_p10 is not False else spurious1 model.set_param_hint('A_spurious', value=p01, min=0, max=1, vary=fixed_p01 is False) model.set_param_hint('B_spurious', value=p10, min=0, max=1, vary=fixed_p10 is False) return model.make_params() def sum_int(x,y): return np.cumsum(y[:-1]*(x[1:]-x[:-1])) def DoubleGaussian(freq, sigma, mu, ampl, sigma0, mu0, ampl0, offset): ''' Double Gaussian function ''' return ampl/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu)/sigma)**2) + \ ampl0/(sigma0*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu0)/sigma0)**2) + \ offset def Gaussian(freq, sigma, mu, ampl, offset): ''' Gaussian function ''' return ampl/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu)/sigma)**2) + offset def Gaussian_guess(model, data, freq, **kwargs): """ Tip: to use this assign this guess function as a method to a model use: model.guess = Gaussian_guess.__get__( model, model.__class__) """ mu_guess = freq[np.argmax(data)] offs_guess = np.median(data) p = (data - offs_guess)**2 p /= p.sum() sigma_guess = np.sqrt(((freq - mu_guess)**2 * p).sum())/10 amp_guess = max(data - offs_guess)*sigma_guess*np.sqrt(2*np.pi) params = model.make_params(sigma=sigma_guess, mu=mu_guess, ampl=amp_guess, offset=offs_guess) params['mu'].min = np.min(freq) params['mu'].max = np.max(freq) return params def half_feed_line_S12_J_func(omega, J, kappaPF, gammaPF, gammaRR, omegaPF, omegaRR, phi, A , B, alpha): return abs( A+np.exp(-1j*phi)*2*B*((-1+np.exp(1j*alpha))*(4*J**2+(gammaPF-2*1j*(omegaPF-omega))*(gammaRR-2j*omegaRR+2j*omega)))/(16*J**2+(4*gammaPF+(3+np.exp(1j*alpha))*kappaPF-8j*(omegaPF-omega))*(gammaRR-2j*omegaRR+2j*omega)) ) def half_feed_line_S12_J_guess(model,data): ''' data should have the frequencies in Hz in the first column and the transmission in the second column ''' background_guess = max(data,key=lambda item:item[1])[1]/2 amp_guess = max(data,key=lambda item:item[1])[0]-min(data,key=lambda item:item[1])[0] omegaPF_guess = min(data,key=lambda item:item[1])[0] #Remove the PF dip to find the second smallest feature at the RR frequency kappaPF_guess_data = np.extract( (data[:,1]<=(min(data,key=lambda item:item[1])[1]+ background_guess/2)),data[:,0]) kappaPF_guess = min(abs( kappaPF_guess_data[0]-omegaPF_guess),abs( kappaPF_guess_data[-1]-omegaPF_guess),5e10) omegaRR_guess = min( np.transpose([np.extract(abs(data[:,0]- omegaPF_guess) >= kappaPF_guess/2,data[:,0]),np.extract(abs(data[:,0]- omegaPF_guess) >= kappaPF_guess/2,data[:,1])]),key=lambda item:item[1])[0] J_guess = kappaPF_guess/4 model.set_param_hint('J',value=J_guess,min=0,max=2e7) model.set_param_hint('kappaPF',value=kappaPF_guess,min=1e6,max=1e8) model.set_param_hint('gammaPF',value=0.001,min=0,max=1e6) model.set_param_hint('gammaRR',value=0.001,min=0,max=1e6) model.set_param_hint('omegaPF',value=omegaPF_guess,min=omegaPF_guess-2e7,max=omegaPF_guess+2e7) model.set_param_hint('omegaRR',value=omegaRR_guess,min=omegaRR_guess-2e7,max=omegaRR_guess+2e7) model.set_param_hint('phi',value=(data[0,1]-data[-1,1])/background_guess) model.set_param_hint('A',value=background_guess,min=(background_guess)-5,max=(2000*background_guess)+5) model.set_param_hint('B',value=background_guess,min=(background_guess)-5,max=(2000*background_guess)+5) model.set_param_hint('alpha',value=3,min=0,max=10) params=model.make_params() return params def TwoErrorFunc_guess(model, delays, data): offset_guess = data[1] amp_guess = data[data.size//2] - data[1] delay_interval = (delays[-1]-delays[1]) mu_A_guess = delays[1] + 0.1*delay_interval mu_B_guess = delays[1] + 0.9*delay_interval sigma_guess = 3e-9 params = model.make_params(amp=amp_guess, mu_A=mu_A_guess, mu_B=mu_B_guess, sigma=sigma_guess, offset = offset_guess) return params def mixer_lo_leakage(vi, vq, li=0.0, lq=0.0, theta_i=0, theta_q=0, offset=0.0): """Model for maximum amplitude of LO leakage of an IQ mixer. Args: vi (:obj:'float'): DC bias voltage applied on the I input of the mixer. vq (:obj:'float'): DC bias voltage applied on the Q input of the mixer. li (float, optional): [TODO:description]. Defaults to 0.0. lq (float, optional): [TODO:description]. Defaults to 0.0. theta_i (int, optional): [TODO:description]. Defaults to 0.0. theta_q (int, optional): [TODO:description]. Defaults to 0.0. offset (float, optional): Offset in dBV accounting for losses in the signal chain. Defaults to 0.0 dBV. Returns: :obj:'float': maximum amplitude of the LO leakage for given parameters Model Schematic: I >-- +V_I ---------------- I R -- li*exp(i*theta_i) -------+ LO | | | LO >------------\-/-----------+ | X +----> RF --/-\-----------+ | | | LO | Q >-- +V_Q ---------------- I R -- lq*exp(i*theta_q-pi/2) --+ """ return 20*np.log10(np.abs(vi + li * np.exp(1j*theta_i) - 1j * vq + lq * np.exp(1j*(theta_q-np.pi/2)) )) + offset def mixer_lo_leakage_guess(model, **kwargs): """Prepare and return parameters of an :py:lmfit.model: for the model mixer_lo_leakage. Args: model (:py:lmfit.model:): The model that the parameter hints will be added to and that is used to generate the parameters. this model should have the following parameters: 'li', 'lq', 'theta_i', 'theta_q', 'scale' Returns: :py:lmfit.parameters: Parameters """ pi_half = np.pi/2 model.set_param_hint('li', value=0.0, min=0, max=1) model.set_param_hint('lq', value=0.0, min=0, max=1, vary=False) model.set_param_hint('theta_i', value=0.0, min=-pi_half, max=pi_half) model.set_param_hint('theta_q', value=0.0, min=-pi_half, max=pi_half, vary=False) model.set_param_hint('offset', value=0.0, min=-4.0, max=+4.0) return model.make_params() ################################# # User defined Models # ################################# # NOTE: it is actually better to instantiate the model within your analysis # file, this prevents the model params having a memory. # A valid reason to define it here would be exp_dec_guess if you want to add a guess function CosModel = lmfit.Model(CosFunc) CosModel.guess = Cos_guess half_Feed_lineS12_J_Model = lmfit.Model(half_feed_line_S12_J_func) half_Feed_lineS12_J_Model.guess = half_feed_line_S12_J_guess ExpDecayModel = lmfit.Model(ExpDecayFunc) TripleExpDecayModel = lmfit.Model(TripleExpDecayFunc) ExpDecayModel.guess = exp_dec_guess # todo: fix ExpDampOscModel = lmfit.Model(ExpDampOscFunc) GaussExpDampOscModel = lmfit.Model(GaussExpDampOscFunc) ExpDampDblOscModel = lmfit.Model(ExpDampDblOscFunc) DoubleExpDampOscModel = lmfit.Model(DoubleExpDampOscFunc) HangerAmplitudeModel = lmfit.Model(HangerFuncAmplitude) SlopedHangerAmplitudeModel = lmfit.Model(SlopedHangerFuncAmplitude) PolyBgHangerAmplitudeModel = lmfit.Model(PolyBgHangerFuncAmplitude) HangerComplexModel = lmfit.Model(HangerFuncComplex) SlopedHangerComplexModel = lmfit.Model(SlopedHangerFuncComplex) QubitFreqDacModel = lmfit.Model(Qubit_dac_to_freq) QubitFreqFluxModel = lmfit.Model(QubitFreqFlux) TwinLorentzModel = lmfit.Model(TwinLorentzFunc) LorentzianModel = lmfit.Model(Lorentzian) RBModel = lmfit.Model(RandomizedBenchmarkingDecay) LinOModel = lmfit.Model(linear_with_offset) LinBGModel = lmfit.Model(linear_with_background) LinBGOModel = lmfit.Model(linear_with_background_and_offset) ErfWindowModel = lmfit.Model(ErfWindow) GaussianModel_v2 = lmfit.models.GaussianModel GaussianModel = lmfit.Model(Gaussian) ExponentialModel = lmfit.models.ExponentialModel HangerWithPfModel = lmfit.Model(hanger_with_pf) SimHangerWithPfModel = lmfit.Model(simultan_hanger_with_pf, independent_vars=['f']) # 2D models Gaus2D_model = lmfit.Model(gaussian_2D, independent_vars=['x', 'y']) Gaus2D_model.guess = gauss_2D_guess # Note: not proper way to add guess func DoubleGauss2D_model = (lmfit.Model(gaussian_2D, independent_vars=['x', 'y'], prefix='A_') + lmfit.Model(gaussian_2D, independent_vars=['x', 'y'], prefix='B_')) DoubleGauss2D_model.guess = double_gauss_2D_guess ################################### # Models based on lmfit functions # ################################### LorentzModel = lmfit.Model(lmfit.models.lorentzian) Lorentz_w_background_Model = lmfit.models.LorentzianModel() + \ lmfit.models.LinearModel() PolyBgHangerAmplitudeModel = (HangerAmplitudeModel * lmfit.models.PolynomialModel(degree=7)) DoubleGaussModel = (lmfit.models.GaussianModel(prefix='A_') + lmfit.models.GaussianModel(prefix='B_')) DoubleGaussModel.guess = double_gauss_guess # defines a guess function def plot_fitres2D_heatmap(fit_res, x, y, axs=None, cmap='viridis'): ''' Convenience function for plotting results of flattened 2D fits. It could be argued this does not belong in fitting models (it is not a model) but I put it here as it is closely related to all the stuff we do with lmfit. If anyone has a better location in mind, let me know (MAR). ''' # fixing the data rotation with [::-1] nr_cols = len(np.unique(x)) data_2D = fit_res.data.reshape(-1, nr_cols, order='C')[::-1] fit_2D = fit_res.best_fit.reshape(-1, nr_cols, order='C')[::-1] guess_2D = fit_res.init_fit.reshape(-1, nr_cols, order='C')[::-1] if axs is None: f, axs = plt.subplots(1, 3, figsize=(14, 6)) axs[0].imshow(data_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[1].imshow(fit_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[2].imshow(guess_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[0].set_title('data') axs[1].set_title('fit-result') axs[2].set_title('initial guess') return axs def mixer_imbalance_sideband(x, y, g=1.0, phi=0, scale=1.0): return np.log10(np.abs((1 - x/g * np.exp(-1j*np.deg2rad(phi + y)) ) * scale)) def mixer_imbalance_sideband_guess(model, **kwargs): model.set_param_hint('g', value=1.0, min=0.5, max=1.5) model.set_param_hint('phi', value=0, min=-20, max=20) model.set_param_hint('scale', value=1, min=-1e3, max=1e3) return model.make_params() # Before defining a new model, take a look at the built in models in lmfit. # From http://lmfit.github.io/lmfit-py/builtin_models.html # Built-in Fitting Models in the models module # Peak-like models # GaussianModel # LorentzianModel # VoigtModel # PseudoVoigtModel # Pearson7Model # StudentsTModel # BreitWignerModel # LognormalModel # DampedOcsillatorModel # ExponentialGaussianModel # SkewedGaussianModel # DonaichModel # Linear and Polynomial Models # ConstantModel # LinearModel # QuadraticModel # ParabolicModel # PolynomialModel # Step-like models # StepModel # RectangleModel # Exponential and Power law models # ExponentialModel # PowerLawModel Mixer calib: improvements in model mixer_imbalance_sideband added docstrings, corrected conversion between V and dBV, adopted parameter limits, ... import numpy as np import matplotlib.pyplot as plt import scipy import lmfit import itertools import logging from pycqed.simulations import transmon from pycqed.utilities.timer import Timer log = logging.getLogger(__name__) ################################# # Fitting Functions Library # ################################# def RandomizedBenchmarkingLeakage(numCliff, pu, pd, p0): # from https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.116.020501 return pu/(pd+pu) * (1-np.exp(-(pd+pu)*numCliff)) + \ p0*np.exp(-(pd+pu)*numCliff) def RandomizedBenchmarkingDecay(numCliff, Amplitude, p, offset): val = Amplitude * (p ** numCliff) + offset return val def DoubleExpDampOscFunc(t, tau_1, tau_2, freq_1, freq_2, phase_1, phase_2, amp_1, amp_2, osc_offset): cos_1 = amp_1 * (np.cos(2 * np.pi * freq_1 * t + phase_1)) * \ np.exp(-(t / tau_1)) cos_2 = amp_2 * (np.cos(2 * np.pi * freq_2 * t + phase_2)) * \ np.exp(-(t / tau_2)) return cos_1 + cos_2 + osc_offset def double_RandomizedBenchmarkingDecay(numCliff, p, offset, invert=1): """ A variety of the RB-curve that allows fitting both the inverting and non-inverting exponential. The amplitude of the decay curve is constrained to start at 0 or 1. The offset is the common point both curves converge to. pick invert to be 1 or 0 """ # Inverting clifford curve val_inv = (1 - offset) * (p ** numCliff) + offset # flipping clifford curve val_flip = -offset * (p ** numCliff) + offset # Using invert as a boolean but not using if statement to allow for # arrays to be input in the function val = (1 - invert) * val_flip + invert * val_inv return val def LorentzFunc(f, amplitude, center, sigma): val = amplitude / np.pi * (sigma / ((f - center) ** 2 + sigma ** 2)) return val def Lorentzian(f, A, offset, f0, kappa): val = offset + A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2)) return val def TwinLorentzFunc(f, A_gf_over_2, A, f0_gf_over_2, f0, kappa_gf_over_2, kappa, background=0): """ Twin lorentz with background. Args: f (float): frequency sweep points in Hz A (float): amplitude of the tallest/deepest Lorentzian structure in the data A_gf_over_2 (float): amplitude of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition f0 (float): frequency of the tallest/deepest Lorentzian structure in the data f0_gf_over_2 (float): frequency of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition kappa (float): kappa (FWHM) of the tallest/deepest Lorentzian structure in the data kappa_gf_over_2 (float): kappa (FWHM) of the other Lorentzian structure in the data; since this function is used for high power qubit spectroscopy, this parameter refers to the Lorentzian structure corresponding to the gf/2 transition background (float): background offset """ val = (A_gf_over_2 / np.pi * (kappa_gf_over_2 / ((f - f0_gf_over_2) ** 2 + kappa_gf_over_2 ** 2)) + A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2)) + background) return val def Qubit_dac_to_freq(dac_voltage, f_max, dac_sweet_spot=None, V_per_phi0=None, phi_park=None, dac_flux_coefficient=None, E_c=0, asymmetry=0.5): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) dac_sweet_spot (V): voltage at which the sweet-spot is found asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)), ''' # using E_c as fit parameter is numerically not stable (changing E_c does only # slightly change the function. Set E_c to zero if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 qubit_freq = (f_max + E_c) * ( asymmetry ** 2 + (1 - asymmetry ** 2) * np.cos(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) ** 2) ** 0.25 - E_c return qubit_freq def Qubit_dac_to_freq_precise(dac_voltage, Ej_max, E_c, asymmetry, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None ): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) Ejmax (Hz): Maximum Ej of qubit Ec (Hz): charging energy of the qubit d = abs((EJ1-EJ2)/(EJ1+EJ2))) dac_sweet_spot (V): voltage at which the sweet-spot is found V_per_phi0 (V): volt per phi0 (convert voltage to flux ''' if np.ndim(dac_voltage) == 0: dac_voltage = np.array([dac_voltage]) if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 phi = np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot) Ej = 2 * np.pi * Ej_max * np.cos(phi) * np.sqrt(1 + asymmetry ** 2 * np.tan(phi) ** 2) E_c = 2 * np.pi * E_c freqs = [] for ej in Ej: freqs.append((transmon.transmon_levels(E_c, ej) / (2 * np.pi))[0]) qubit_freq = np.array(freqs) return qubit_freq def Qubit_dac_to_freq_res(dac_voltage, Ej_max, E_c, asymmetry, coupling, fr, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None, dim_charge=31, ): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) Ejmax (Hz): Maximum Ej of qubit Ec (Hz): charging energy of the qubit d = abs((EJ1-EJ2)/(EJ1+EJ2))) dac_sweet_spot (V): voltage at which the sweet-spot is found V_per_phi0 (V): volt per phi0 (convert voltage to flux) phi_park ''' return_float = False if np.ndim(dac_voltage) == 0: dac_voltage = np.array([dac_voltage]) return_float = True if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 phi = np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot) Ej = Ej_max * np.cos(phi) * np.sqrt(1 + asymmetry ** 2 * np.tan(phi) ** 2) with Timer('fitmod.loop', verbose=False): freqs = [(transmon.transmon_resonator_levels(E_c, ej, fr, coupling, states=[(1, 0), (2, 0)], dim_charge=dim_charge ))[0] for ej in Ej] qubit_freq = np.array(freqs) return qubit_freq[0] if return_float else qubit_freq def Qubit_freq_to_dac_res(frequency, Ej_max, E_c, asymmetry, coupling, fr, dac_sweet_spot=0.0, V_per_phi0=None, dac_flux_coefficient=None, phi_park=None, branch='smallest'): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. This function implements the inverse of "Qubit_dac_to_freq" frequency (Hz) Ej_max (Hz): Maximum josephson energy E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) couping (Hz): coupling to resonator fr (Hz): frequency of resonator dac_sweet_spot (V): voltage at which the sweet-spot is found branch (enum: 'positive' 'negative' or "smallest") ''' if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 return_float = False if np.ndim(frequency) == 0: frequency = [frequency] return_float = True E_j = [transmon.transmon_resonator_ej_anh_frg_chi( f, ec=E_c, frb=fr, gb=coupling)[0] for f in frequency] E_j = np.array(E_j) r = E_j / Ej_max if np.any(r > 1): log.warning(f'Ratio Ej/Ej_max is larger than 1 at ' f'indices {np.argwhere(r > 1)}.Truncating to 1.') r[r>1] = 1 phi = np.arccos(np.sqrt((r**2 - asymmetry**2)/(1-asymmetry**2))) if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient dac_voltage_pos = phi * V_per_phi0 / np.pi + dac_sweet_spot dac_voltage_neg = -phi * V_per_phi0 / np.pi + dac_sweet_spot if branch == 'positive': dac_voltage = dac_voltage_pos elif branch == 'negative': dac_voltage = dac_voltage_neg elif branch == 'smallest': if np.ndim(phi) != 0: dac_voltage = np.array([dac_voltage_pos, dac_voltage_neg]) idxs0 = np.argmin(np.abs(dac_voltage), 0) idxs1 = np.arange(len(dac_voltage_pos)) dac_voltage = dac_voltage[idxs0, idxs1] else: dac_voltage = dac_voltage_pos \ if abs(dac_voltage_pos) < abs(dac_voltage_neg) \ else dac_voltage_neg else: raise ValueError('branch {} not recognized'.format(branch)) return dac_voltage[0] if return_float else dac_voltage def Resonator_dac_to_freq(dac_voltage, f_max_qubit, f_0_res, E_c, dac_sweet_spot, coupling, V_per_phi0=None, dac_flux_coefficient=None, asymmetry=0): qubit_freq = Qubit_dac_to_freq(dac_voltage=dac_voltage, f_max=f_max_qubit, E_c=E_c, dac_sweet_spot=dac_sweet_spot, V_per_phi0=V_per_phi0, dac_flux_coefficient=dac_flux_coefficient, asymmetry=asymmetry) delta_qr = (qubit_freq - f_0_res) lamb_shift = (coupling ** 2 / delta_qr) resonator_freq = f_0_res - lamb_shift return resonator_freq def Qubit_dac_to_detun(dac_voltage, f_max, E_c, dac_sweet_spot, V_per_phi0, asymmetry=0): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. dac_voltage (V) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) dac_sweet_spot (V): voltage at which the sweet-spot is found asymmetry (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) ''' return f_max - Qubit_dac_to_freq(dac_voltage, f_max=f_max, E_c=E_c, dac_sweet_spot=dac_sweet_spot, V_per_phi0=V_per_phi0, asymmetry=asymmetry) def Qubit_freq_to_dac(frequency, f_max, dac_sweet_spot=None, V_per_phi0=None, dac_flux_coefficient=None, asymmetry=0, E_c=0, phi_park=None, branch='smallest'): ''' The cosine Arc model for uncalibrated flux for asymmetric qubit. This function implements the inverse of "Qubit_dac_to_freq" frequency (Hz) f_max (Hz): sweet-spot frequency of the qubit E_c (Hz): charging energy of the qubit V_per_phi0 (V): volt per phi0 (convert voltage to flux) asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)) dac_sweet_spot (V): voltage at which the sweet-spot is found branch (enum: 'positive' 'negative') ''' if V_per_phi0 is None and dac_flux_coefficient is None: raise ValueError('Please specify "V_per_phi0".') if dac_sweet_spot is None and phi_park is None: raise ValueError('Please specify "phi_park".') elif dac_sweet_spot is not None and phi_park is not None: raise ValueError('"phi_park" and "dac_sweet_spot" cannot ' 'be used simultaneously.') if phi_park is not None: dac_sweet_spot = phi_park * V_per_phi0 # asymm_term = (asymmetry**2 + (1-asymmetry**2)) # dac_term = np.arccos(((frequency+E_c)/((f_max+E_c) * asymm_term))**2) dac_term = np.arccos(np.sqrt( (((frequency + E_c) / (f_max + E_c)) ** 4 - asymmetry ** 2) / (1 - asymmetry ** 2))) if dac_flux_coefficient is not None: log.warning('"dac_flux_coefficient" deprecated. Please use the ' 'physically meaningful "V_per_phi0" instead.') V_per_phi0 = np.pi / dac_flux_coefficient dac_voltage_pos = dac_term * V_per_phi0 / np.pi + dac_sweet_spot dac_voltage_neg = -dac_term * V_per_phi0 / np.pi + dac_sweet_spot if branch == 'positive': dac_voltage = dac_voltage_pos elif branch == 'negative': dac_voltage = dac_voltage_neg elif branch == 'smallest': if np.ndim(dac_term) != 0: dac_voltage = np.array([dac_voltage_pos, dac_voltage_neg]) idxs0 = np.argmin(np.abs(dac_voltage), 0) idxs1 = np.arange(len(dac_voltage_pos)) dac_voltage = dac_voltage[idxs0, idxs1] else: dac_voltage = dac_voltage_pos \ if abs(dac_voltage_pos) < abs(dac_voltage_neg) \ else dac_voltage_neg else: raise ValueError('branch {} not recognized'.format(branch)) return dac_voltage def Qubit_dac_sensitivity(dac_voltage, f_max: float, E_c: float, dac_sweet_spot: float, V_per_phi0: float, asymmetry: float = 0): ''' Derivative of the qubit detuning vs dac at dac_voltage. The returned quantity is "dfreq/dPhi (dac_voltage)" ''' cos_term = np.cos(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) sin_term = np.sin(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot)) return ((f_max + E_c) * (1 - asymmetry ** 2) * np.pi / (2 * V_per_phi0) * cos_term * sin_term * (asymmetry ** 2 + (1 - asymmetry ** 2) * cos_term ** 2) ** (-0.75)) def QubitFreqDac(dac_voltage, f_max, E_c, dac_sweet_spot, dac_flux_coefficient, asymmetry=0): log.warning('deprecated, replace QubitFreqDac with Qubit_dac_to_freq') return Qubit_dac_to_freq(dac_voltage, f_max, E_c, dac_sweet_spot, dac_flux_coefficient, asymmetry) def QubitFreqFlux(flux, f_max, E_c, flux_zero, dac_offset=0): 'The cosine Arc model for calibrated flux.' calculated_frequency = (f_max + E_c) * np.sqrt(np.abs( np.cos(np.pi * (flux - dac_offset) / flux_zero))) - E_c return calculated_frequency def CosFunc(t, amplitude, frequency, phase, offset): ''' parameters: t, time in s amplitude a.u. frequency in Hz (f, not omega!) phase in rad offset a.u. ''' return amplitude * np.cos(2 * np.pi * frequency * t + phase) + offset def ResidZZFuncJoint(t, amplitude, amplitude1, tau, alpha, t11, frequency, phase, offset): x = t11*alpha without_pulse = amplitude * np.exp(-t/tau)*np.cos( 2*np.pi*frequency*t + phase) + offset with_pulse = amplitude1 * np.exp(-t/tau)*(x*np.exp(-t*alpha/x)*np.cos( 2*np.pi*(frequency+alpha)*t + phase) - np.sin( 2*np.pi*frequency*t + phase))/np.sqrt(1+x**2) + offset return without_pulse, with_pulse # return amplitude * np.exp(-(t / tau) ** n) * (np.cos( # 2 * np.pi * frequency * t + phase) + oscillation_offset) + \ # exponential_offset def ResidZZFunc(t, amplitude, tau, alpha, x, frequency, phase, offset): return amplitude * np.exp(-t/tau)*(x*np.exp(-t*alpha/x)*np.cos( 2*np.pi*(frequency+alpha)*t + phase) - np.sin( 2*np.pi*frequency*t + phase))/np.sqrt(1+x**2) + offset def ExpDecayFunc(t, tau, amplitude, offset, n): return amplitude * np.exp(-(t / tau) ** n) + offset def ExpDecayPmod(t, T2echo, delta, n0, chi, kappa, phase, amplitude, offset): """ specific form of exponential decay used for residual resonator photon readout """ return amplitude * (1 - np.imag(np.exp(-t*(1/T2echo + 2j*np.pi*delta)+1j*( phase-2*n0*chi*(1-np.exp(-t*(kappa + 2j*chi)))/(kappa + 2j*chi)))))+offset def CombinedOszExpDecayFunc(t, tau, tau_gauss, phase, n0, chi, delta, amplitude, oscillation_offset, offset): """ Combination of gaussian and exponential decay """ return amplitude * np.exp(-(t / tau)-(t / tau_gauss))*(np.cos( 2 * np.pi * (n0*2*chi/(2*np.pi)+delta) * t + phase) + oscillation_offset) + offset def idle_error_rate_exp_decay(N, N1, N2, A, offset): """ exponential decay consisting of two components """ return A * np.exp(-N / N1 - (N / N2) ** 2) + offset def gain_corr_ExpDecayFunc(t, tau, amp, gc): """ Specific form of an exponential decay used for flux corrections. Includes a "gain correction" parameter that is ignored when correcting the distortions. """ y = gc * (1 + amp * np.exp(-t / tau)) return y def gain_corr_double_ExpDecayFunc(t, tau_A, tau_B, amp_A, amp_B, gc): """ Specific form of an exponential decay used for flux corrections. Includes a "gain correction" parameter that is ignored when correcting the distortions. """ y = gc * (1 + amp_A * np.exp(-t / tau_A) + amp_B * np.exp(-t / tau_B)) return y def ExpDampOscFunc(t, tau, n, frequency, phase, amplitude, oscillation_offset, exponential_offset): return amplitude * np.exp(-(t / tau) ** n) * (np.cos( 2 * np.pi * frequency * t + phase) + oscillation_offset) + \ exponential_offset def GaussExpDampOscFunc(t, tau, tau_2, frequency, phase, amplitude, oscillation_offset, exponential_offset): return amplitude * np.exp(-(t / tau_2) ** 2 - (t / tau)) * (np.cos( 2 * np.pi * frequency * t + phase) + oscillation_offset) + exponential_offset def ExpDampDblOscFunc(t, tau, n, freq_1, freq_2, phase_1, phase_2, amp_1, amp_2, osc_offset_1, osc_offset_2, exponential_offset): ''' Exponential decay with double cosine modulation ''' exp_decay = np.exp(-(t / tau) ** n) cos_1 = (np.cos( 2 * np.pi * freq_1 * t + phase_1) + osc_offset_1) cos_2 = (np.cos( 2 * np.pi * freq_2 * t + phase_2) + osc_offset_2) return amp_1 * exp_decay * cos_1 + amp_2 * exp_decay * cos_2 + exponential_offset def HangerFuncAmplitude(f, f0, Q, Qe, A, theta): ''' This is the function for a hanger which does not take into account a possible slope. This function may be preferred over SlopedHangerFunc if the area around the hanger is small. In this case it may misjudge the slope Theta is the asymmetry parameter Note! units are inconsistent f is in Hz f0 is in GHz ''' return abs(A * (1. - Q / Qe * np.exp(1.j * theta) / (1. + 2.j * Q * (f / 1.e9 - f0) / f0))) def HangerFuncComplex(f, pars): ''' This is the complex function for a hanger which DOES NOT take into account a possible slope Input: f = frequency pars = parameters dictionary f0, Q, Qe, A, theta, phi_v, phi_0 Author: Stefano Poletto ''' f0 = pars['f0'] Q = pars['Q'] Qe = pars['Qe'] A = pars['A'] theta = pars['theta'] phi_v = pars['phi_v'] phi_0 = pars['phi_0'] S21 = A * (1 - Q / Qe * np.exp(1j * theta) / (1 + 2.j * Q * (f / 1.e9 - f0) / f0)) * \ np.exp(1j * (phi_v * f + phi_0)) return S21 def PolyBgHangerFuncAmplitude(f, f0, Q, Qe, A, theta, poly_coeffs): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible polynomial background # NOT DEBUGGED return np.abs((1. + np.polyval(poly_coeffs, (f / 1.e9 - f0) / f0)) * HangerFuncAmplitude(f, f0, Q, Qe, A, theta)) def SlopedHangerFuncAmplitude(f, f0, Q, Qe, A, theta, slope): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible slope df return np.abs((1. + slope * (f / 1.e9 - f0) / f0) * HangerFuncAmplitude(f, f0, Q, Qe, A, theta)) def SlopedHangerFuncComplex(f, f0, Q, Qe, A, theta, phi_v, phi_0, slope): # This is the function for a hanger (lambda/4 resonator) which takes into # account a possible slope df return (1. + slope * (f / 1.e9 - f0) / f0) * np.exp(1.j * (phi_v * f + phi_0 - phi_v * f[0])) * \ HangerFuncComplex(f, f0, Q, Qe, A, theta) def linear_with_offset(x, a, b): ''' A linear signal with a fixed offset. ''' return a * x + b def linear_with_background(x, a, b): ''' A linear signal with a fixed background. ''' return np.sqrt((a * x) ** 2 + b ** 2) def linear_with_background_and_offset(x, a, b, c): ''' A linear signal with a fixed background. ''' return np.sqrt((a * x) ** 2 + b ** 2) + c def gaussianCDF(x, amplitude, mu, sigma): """ CDF of gaussian is P(X<=x) = .5 erfc((mu-x)/(sqrt(2)sig)) """ return 0.5 * amplitude * scipy.special.erfc((mu - x) / (np.sqrt(2)*sigma)) def double_gaussianCDF(x, A_amplitude, A_mu, A_sigma, B_amplitude, B_mu, B_sigma): """ CDF of two gaussians added on top of each other. uses "gaussianCDF" """ CDF_A = gaussianCDF(x, amplitude=A_amplitude, mu=A_mu, sigma=A_sigma) CDF_B = gaussianCDF(x, amplitude=B_amplitude, mu=B_mu, sigma=B_sigma) return CDF_A + CDF_B def TwoErrorFunc(x, amp, mu_A, mu_B, sigma, offset): ''' parameters: ''' return offset + double_gaussianCDF(x, amp, mu_A, sigma, -amp, mu_B, sigma) def ro_gauss(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): ''' Two double-gaussians with sigma and mu/center of the residuals equal to the according state. ''' gauss = lmfit.lineshapes.gaussian A_gauss = gauss(x=x[0], center=A_center, sigma=A_sigma, amplitude=A_amplitude) B_gauss = gauss(x=x[1], center=B_center, sigma=B_sigma, amplitude=B_amplitude) gauss0 = ((1-A_spurious)*A_gauss + A_spurious*B_gauss) gauss1 = ((1-B_spurious)*B_gauss + B_spurious*A_gauss) return [gauss0, gauss1] def ro_CDF(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): cdf = gaussianCDF A_gauss = cdf(x=x[0], mu=A_center, sigma=A_sigma, amplitude=A_amplitude) B_gauss = cdf(x=x[1], mu=B_center, sigma=B_sigma, amplitude=B_amplitude) gauss0 = ((1-A_spurious)*A_gauss + A_spurious*B_gauss) gauss1 = ((1-B_spurious)*B_gauss + B_spurious*A_gauss) return [gauss0, gauss1] def ro_CDF_discr(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious, B_spurious): #A_amplitude /= 1-A_spurious #B_amplitude /= 1-B_spurious return ro_CDF(x, A_center, B_center, A_sigma, B_sigma, A_amplitude, B_amplitude, A_spurious=0, B_spurious=0) def gaussian_2D(x, y, amplitude=1, center_x=0, center_y=0, sigma_x=1, sigma_y=1): ''' A 2D gaussian function. if you want to use this for fitting you need to flatten your data first. ''' gauss = lmfit.lineshapes.gaussian val = (gauss(x, amplitude, center_x, sigma_x) * gauss(y, amplitude, center_y, sigma_y)) return val def DoubleExpDecayFunc(t, tau1, tau2, amp1, amp2, offset, n): return (offset + amp1 * np.exp(-(t / tau1) ** n) + amp2 * np.exp(-(t / tau2) ** n)) def TripleExpDecayFunc(t, tau1, tau2, tau3, amp1, amp2, amp3, offset, n): return (offset + amp1 * np.exp(-(t / tau1) ** n) + amp2 * np.exp(-(t / tau2) ** n) + amp3 * np.exp(-(t / tau3) ** n)) def avoided_crossing_mediated_coupling(flux, f_bus, f_center1, f_center2, c1, c2, g, flux_state=0): """ Calculates the frequencies of an avoided crossing for the following model. [f_b, g, g ] [g, f_1, 0 ] [g, 0, f_2] f1 = c1*flux + f_center1 f2 = c2*flux + f_center2 f_b = constant g: the coupling strength, beware to relabel your variable if using this model to fit J1 or J2. flux_state: this is a switch used for fitting. It determines which transition to return """ if type(flux_state) == int: flux_state = [flux_state] * len(flux) frequencies = np.zeros([len(flux), 2]) for kk, dac in enumerate(flux): f_1 = dac * c1 + f_center1 f_2 = dac * c2 + f_center2 matrix = [[f_bus, g, g], [g, f_1, 0.], [g, 0., f_2]] frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2] result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1]) return result def avoided_crossing_direct_coupling(flux, f_center1, f_center2, c1, c2, g, flux_state=0): """ Calculates the frequencies of an avoided crossing for the following model. [f_1, g ] [g, f_2] f1 = c1*flux + f_center1 f2 = c2*flux + f_center2 g: the coupling strength, beware to relabel your variable if using this model to fit J1 or J2. flux_state: this is a switch used for fitting. It determines which transition to return """ if type(flux_state) == int: flux_state = [flux_state] * len(flux) frequencies = np.zeros([len(flux), 2]) for kk, dac in enumerate(flux): f_1 = dac * c1 + f_center1 f_2 = dac * c2 + f_center2 matrix = [[f_1, g], [g, f_2]] frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2] result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1]) return result def ErfWindow(t, t_start, t_end, t_rise, amplitude, offset): ''' parameters: t, time in s t_start, start of window in s t_end, end of window in s amplitude a.u. offset a.u. t_rise in s (rise time) ''' return offset + amplitude/2*(scipy.special.erf((t - t_start)/(t_rise/2.6)) - scipy.special.erf((t - t_end)/(t_rise/2.6))) def hanger_with_pf(f, phi, J, kappa_pf, omega_ro, omega_pf, gamma_ro, A): S12 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f-omega_ro))/ (4*J*J+(kappa_pf-2j*(f-omega_pf))* (gamma_ro-2j*(f-omega_ro))) ) return S12 def simultan_hanger_with_pf(f, phi, J, kappa_pf, omega_ro_0, omega_ro_1, omega_pf, gamma_ro, A): f0 = f[:int(len(f)/2)] f1 = f[int(len(f)/2):] S12_0 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f0-omega_ro_0))/ (4*J*J+(kappa_pf-2j*(f0-omega_pf))* (gamma_ro-2j*(f0-omega_ro_0))) ) S12_1 = A * np.abs( np.cos(phi) - np.exp(1j*phi)*kappa_pf* (gamma_ro-2j*(f1-omega_ro_1))/ (4*J*J+(kappa_pf-2j*(f1-omega_pf))* (gamma_ro-2j*(f1-omega_ro_1))) ) return np.hstack((S12_0, S12_1)) def fit_hanger_with_pf(model, data, simultan=False): """ model: a lmfit model of the pf_ro_S12 function data: of the shape [[freq,|S12|],[freq,|S12|],...] where freq should be in GHz return: a lmfit model.fit object """ def hanger_with_pf_guess(model, data): A_guess = 1 midmax = (data[0, 1]+data[-1, 1])/2 J_guess = 0.006 omega_pf_guess = min(data, key = lambda t: t[1])[0] pf_filter = [False]*len(data) for x in range(0,len(data)): if data[x,1] <= midmax: pf_filter[x] = True kappa_pf_guess = min(2*abs(list(itertools.compress( data[:, 0],pf_filter))[0]- omega_pf_guess), 2*abs(list(itertools.compress( data[:, 0],pf_filter))[-1]- omega_pf_guess), 0.05) omega_ro_filter = [True]*len(data) for x in range(0,len(data)): if abs(data[x, 0]-omega_pf_guess) <= kappa_pf_guess/2: omega_ro_filter[x] = False omega_ro_guess = min(list(itertools.compress( data,omega_ro_filter)), key = lambda t: t[1])[0] for x in range(0,len(data)): if min(omega_pf_guess, omega_ro_guess) <= data[x,0]: if data[x,0] <= max(omega_pf_guess, omega_ro_guess): if midmax <= data[x,1]: midmax = data[x,0] model.set_param_hint('A', value=A_guess, min = 0, max = 1) model.set_param_hint('phi', value=4 , min = 0, max = 2*np.pi) model.set_param_hint('J', value=J_guess, min=0, max=0.015) model.set_param_hint('kappa_pf', value=kappa_pf_guess, min=0, max=0.05) model.set_param_hint('omega_pf', value=omega_pf_guess, min=0) model.set_param_hint('gamma_ro', value=0.0001, vary = False) model.set_param_hint('omega_ro', value=omega_ro_guess) params = model.make_params() return params if simultan: data_loc_0 = np.copy(data[0][:-2]) data_loc_1 = np.copy(data[1][:-2]) fit0 = fit_hanger_with_pf(HangerWithPfModel, np.transpose( [data_loc_0[:,0], data_loc_0[:,1]]), simultan=False) fit1 = fit_hanger_with_pf(HangerWithPfModel, np.transpose( [data_loc_1[:,0], data_loc_1[:,1]]), simultan=False) if not len(data_loc_1) == len(data_loc_0): if len(data_loc_1) < len(data_loc_0): data_loc_0 = data_loc_0[:len(data_loc_1)] else: data_loc_1 = data_loc_1[:len(data_loc_0)] data_loc_0[:,0] *= 1e-9 data_loc_1[:,0] *= 1e-9 data_loc = np.vstack((data_loc_0, data_loc_1)) phi_guess = (fit0.params['phi'].value+fit1.params['phi'].value)/2 J_guess = (fit0.params['J'].value+fit1.params['J'].value)/2*1e-9 kap_guess = (fit0.params['kappa_pf'].value+ fit1.params['kappa_pf'].value)/2*1e-9 omega_pf_guess = (fit0.params['omega_pf'].value+ fit1.params['omega_pf'].value)/2*1e-9 model.set_param_hint('A', value=1, min = 0, max = 1) model.set_param_hint('A', value=1, min = 0, max = 1) model.set_param_hint('phi', value=phi_guess , min = 0, max = 2*np.pi) model.set_param_hint('J', value=J_guess, min=0, max=0.015) model.set_param_hint('kappa_pf', value=kap_guess, min=0, max=0.05) model.set_param_hint('omega_pf', value=omega_pf_guess, min=0) model.set_param_hint('gamma_ro', value=0.0001, vary = False) model.set_param_hint('omega_ro_0', value= fit0.params['omega_ro'].value*1e-9, min=0) model.set_param_hint('omega_ro_1', value= fit1.params['omega_ro'].value*1e-9, min=0) guess = model.make_params() fit_out = model.fit( data_loc[:,1]/max(data_loc[:,1]), guess, f=data_loc[:,0]) tol = 0.4 else: data_loc = np.copy(data) data_loc[:,0] *= 1e-9 guess = hanger_with_pf_guess(model,np.transpose( [data_loc[:,0],data_loc[:,1]/max(data_loc[:,1])])) fit_out = model.fit( data_loc[:,1]/ max(data_loc[:,1]),guess,f=data_loc[:,0],) tol = 0.1 if fit_out.chisqr > tol: fit_lst = [] for shift_pf in np.linspace(-0.01, 0.01, 5): for shift_ro in np.linspace(-0.01, 0.01, 5): for phase in np.linspace(0, 2*np.pi, 6): for kappa_shift in np.linspace(-0.015,0.015,4): for J_shift in np.linspace(-0.002,0.004,4): if simultan: fit_lst.append(model.fit( data_loc[:,1]/ max(data_loc[:,1]),guess, f=data_loc[:,0], phi=phase,omega_pf=float(guess['omega_pf'])+shift_pf, omega_ro_0=float(guess['omega_ro_0'])+shift_ro, omega_ro_1=float(guess['omega_ro_1'])+shift_ro, kappa_pf=float(guess['kappa_pf'])+kappa_shift, J=float(guess['J'])+J_shift)) else: fit_lst.append(model.fit( data_loc[:,1]/max( data_loc[:,1]),guess,f=data_loc[:,0],phi=phase, omega_pf=float(guess['omega_pf'])+shift_pf, omega_ro=float(guess['omega_ro'])+shift_ro, kappa_pf=float(guess['kappa_pf'])+kappa_shift, J=float(guess['J'])+J_shift)) if fit_lst[-1].chisqr <= tol: break chisqr_lst = [fit.chisqr for fit in fit_lst] fit_out = fit_lst[np.argmin(chisqr_lst)] fit_out.params['J'].max = 15e6 fit_out.params['kappa_pf'].max = 50e6 fit_out.params['omega_pf'].value *= 1e9 fit_out.params['kappa_pf'].value *= 1e9 fit_out.params['J'].value *= 1e9 fit_out.params['gamma_ro'].value *= 1e9 fit_out.params['A'].value *= max(data_loc[:,1]) if simultan: fit_out.params['omega_ro_0'].value *= 1e9 fit_out.params['omega_ro_1'].value *= 1e9 else: fit_out.params['omega_ro'].value *= 1e9 if fit_out.chisqr > tol: log.warning('The fit did not converge properly: chi^2 = ' ''+str(fit_out.chisqr)) #Unpack simultan fits if simultan: fit0.params['omega_pf'].value = fit_out.params['omega_pf'].value fit0.params['kappa_pf'].value = fit_out.params['kappa_pf'].value fit0.params['J'].max = fit_out.params['J'].max fit0.params['J'].value = fit_out.params['J'].value fit0.params['gamma_ro'].value = fit_out.params['gamma_ro'].value fit0.params['omega_ro'].value = fit_out.params['omega_ro_0'].value fit1.params['omega_pf'].value = fit_out.params['omega_pf'].value fit1.params['kappa_pf'].value = fit_out.params['kappa_pf'].value fit1.params['J'].max = fit_out.params['J'].max fit1.params['J'].value = fit_out.params['J'].value fit1.params['gamma_ro'].value = fit_out.params['gamma_ro'].value fit1.params['omega_ro'].value = fit_out.params['omega_ro_1'].value fit_out = [fit0, fit1] return fit_out ###################### # Residual functions # ###################### def residual_complex_fcn(pars, cmp_fcn, x, y): ''' Residual of a complex function with complex results 'y' and real input values 'x' For resonators 'x' is the the frequency, 'y' the complex transmission Input: pars = parameters dictionary (check the corresponding function 'cmp_fcn' for the parameters to pass) cmp_fcn = complex function x = input real values to 'cmp_fcn' y = output complex values from 'cmp_fcn' Author = Stefano Poletto ''' cmp_values = cmp_fcn(x, pars) res = cmp_values - y res = np.append(res.real, res.imag) return res #################### # Guess functions # #################### def exp_dec_guess(model, data, t): ''' Assumes exponential decay in estimating the parameters ''' offs_guess = data[np.argmax(t)] amp_guess = data[np.argmin(t)] - offs_guess # guess tau by looking for value closest to 1/e tau_guess = t[np.argmin(abs((amp_guess * (1 / np.e) + offs_guess) - data))] model.set_param_hint('amplitude', value=amp_guess) model.set_param_hint('tau', value=tau_guess) model.set_param_hint('n', value=1, vary=False) model.set_param_hint('offset', value=offs_guess) params = model.make_params() return params def group_consecutives(vals, step=1): """Return list of consecutive lists of numbers from vals (number list).""" run = [] result = [run] expect = None for v in vals: if (v == expect) or (expect is None): run.append(v) else: run = [v] result.append(run) expect = v + step return result def arc_guess(freq, dac, dd=0.1): ''' Expects the dac values to be sorted! :param freq: :param dac: :param dd: :return: ''' p = round(max(dd * len(dac), 1)) f_small = np.average(np.sort(freq)[:p]) + np.std(np.sort(freq)[:p]) f_big = np.average(np.sort(freq)[-p:]) - np.std(np.sort(freq)[-p:]) #print(f_small * 1e-9, f_big * 1e-9) fmax = np.max(freq) fmin = np.min(freq) dacs_ss = np.where(freq >= f_big)[0] dacs_as = np.where(freq <= f_small)[0] dacs_ss_groups = group_consecutives(vals=dacs_ss, step=1) dacs_as_groups = group_consecutives(vals=dacs_as, step=1) dacs_ss_single = [] for g in dacs_ss_groups: ind = g[np.argmax(freq[g])] # ind = int(round(np.average(g))) dacs_ss_single.append(ind) dac_ss_group_index = np.argmin(np.abs(dac[dacs_ss_single])) dac_ss_index = dacs_ss_single[dac_ss_group_index] min_left = 0 min_right = len(dac) - 1 dacs_as_single = [] for g in dacs_as_groups: if 0 in g: ind = 0 elif len(dac) - 1 in g: ind = len(dac) - 1 else: ind = int(round(np.average(g))) if ind < dac_ss_index: min_left = max(ind, min_left) elif ind > dac_ss_index: min_right = min(ind, min_right) dacs_as_single.append(ind) # print('maxs', dacs_ss_single) # print('mins', dacs_as_single) arc_len = (dac[min_right] - dac[min_left]) # print('%d to %d = %.5f' % (min_left, min_right, arc_len)) if min_left == 0 or min_right == len(dac) - 1: arc_len *= 2 elif len(dacs_ss_groups) > 1: arc_len = np.average(dac[dacs_ss_single[1:]] - dac[dacs_ss_single[:-1]]) return fmax, fmin, dac[dac_ss_index], arc_len def Resonator_dac_arch_guess(model, freq, dac_voltage, f_max_qubit: float = None, E_c: float = None): fmax, fmin, dac_ss, period = arc_guess(freq=freq, dac=dac_voltage) coup_guess = 15e6 # todo make better f_res guess f_res = np.mean(freq) # - (coup_guess ** 2 / (f_max_qubit - fmax)) f_max_qubit_vary = f_max_qubit is None f_max_qubit = f_max_qubit or f_res - 500e6 model.set_param_hint('f_0_res', value=f_res, min=f_res / 2, max=2 * f_res) model.set_param_hint('f_max_qubit', value=f_max_qubit, min=3e9, max=8.5e9, vary=f_max_qubit_vary) model.set_param_hint('dac_sweet_spot', value=dac_ss, min=(dac_ss - 0.005) / 2, max=2 * (dac_ss + 0.005)) model.set_param_hint('V_per_phi0', value=period, min=(period - 0.005) / 3, max=5 * (period + 0.005)) model.set_param_hint('asymmetry', value=0, max=1, min=-1) model.set_param_hint('coupling', value=coup_guess, min=1e6, max=80e6) E_c = E_c or 260e6 model.set_param_hint('E_c', value=E_c, min=50e6, max=400e6) params = model.make_params() return params def Qubit_dac_arch_guess(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('f_max', value=fixed_params.get('f_max', f_max), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def Qubit_dac_arch_guess_precise(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('Ej_max', value=fixed_params.get('Ej_max'), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def Qubit_dac_arch_guess_res(model, data, dac_voltage, fixed_params=None): f_max, dac_ss = np.max(data), dac_voltage[np.argmax(data)] f_min, dac_lss = np.min(data), dac_voltage[np.argmin(data)] V_per_phi0 = abs(2*(dac_ss-dac_lss)) d = (f_min)**2/(f_max)**2 if fixed_params is None: fixed_params = {"E_c": 0} model.set_param_hint('Ej_max', value=fixed_params.get('Ej_max'), min=0, vary=not 'f_max' in fixed_params) model.set_param_hint('V_per_phi0', value=fixed_params.get('V_per_phi0', V_per_phi0), min=0, vary=not 'V_per_phi0' in fixed_params) model.set_param_hint('asymmetry', value=fixed_params.get('asymmetry', d), min=0, max=1, vary=not 'asymmetry' in fixed_params) model.set_param_hint('E_c', value=fixed_params.get('E_c', 0), vary=not 'E_c' in fixed_params) model.set_param_hint('coupling', value=fixed_params.get('coupling', 100e6), vary=not 'coupling' in fixed_params) model.set_param_hint('fr', value=fixed_params.get('fr', 7e9), vary=not 'fr' in fixed_params) if "phi_park" in fixed_params: model.set_param_hint('phi_park', value=fixed_params['phi_park'], vary=False) # dac_sweet_spot should be eligible for fitting only if phi_park is not fixed. # We cannot specify both in the model as they refer to the same physical quantity. # Note that current config does not allow to fit phi_park else: model.set_param_hint('dac_sweet_spot', value=fixed_params.get('dac_sweet_spot', dac_ss), min=-3, max=3, vary=not 'dac_sweet_spot' in fixed_params) params = model.make_params() return params def idle_err_rate_guess(model, data, N): ''' Assumes exponential decay in estimating the parameters ''' amp_guess = 0.5 offset = np.mean(data) N1 = np.mean(N) N2 = np.mean(N) params = model.make_params(A=amp_guess, N1=N1, N2=N2, offset=offset) return params def fft_freq_phase_guess(data, t, freq_guess=None): ''' Guess for a cosine fit using FFT, only works for evenly spaced points ''' # Freq guess ! only valid with uniform sampling # Only first half of array is used, because the second half contains the # negative frequecy components, and we want a positive frequency. w = np.fft.fft(data)[:len(data) // 2] f = np.fft.fftfreq(len(data), t[1] - t[0])[:len(w)] if freq_guess is None: w[0] = 0 # Removes DC component from fourier transform # Use absolute value of complex valued spectrum abs_w = np.abs(w) freq_guess = abs(f[abs_w == max(abs_w)][0]) ph_guess = 2 * np.pi - (2 * np.pi * t[data == max(data)] * freq_guess)[0] # the condition data == max(data) can have several solutions # (for example when discretization is visible) # to prevent errors we pick the first solution return freq_guess, ph_guess def Cos_guess(model, data, t, **kwargs): """ Tip: to use this assign this guess function as a method to a model use: model.guess = Cos_guess.__get__( model, model.__class__) """ amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention offs_guess = np.mean(data) freq_guess, ph_guess = fft_freq_phase_guess(data, t, **kwargs) model.set_param_hint('period', expr='1/frequency') params = model.make_params(amplitude=amp_guess, frequency=freq_guess, phase=ph_guess, offset=offs_guess) params['amplitude'].min = 0 # Ensures positive amp params['frequency'].min = 0 return params def exp_damp_osc_guess(model, data, t, n_guess=1): """ Makes a guess for an exponentially damped oscillation. Uses the fft_freq_phase guess to guess the oscillation parameters. The guess for the exponential is simpler as it sets the exponent (n) at 1 and the tau at 2/3 of the total range """ amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention freq_guess, ph_guess = fft_freq_phase_guess(data, t) osc_offs_guess = 0 tau_guess = 2 / 3 * max(t) exp_offs_guess = np.mean(data) params = model.make_params(amplitude=amp_guess, frequency=freq_guess, phase=ph_guess, oscillation_offset=osc_offs_guess, exponential_offset=exp_offs_guess, n=n_guess, tau=tau_guess) return params def Cos_amp_phase_guess(model, data, f, t): ''' Guess for a cosine fit with fixed frequency f. ''' amp_guess = abs(max(data) - min(data)) / 2 # amp is positive by convention offs_guess = np.mean(data) ph_guess = (-2 * np.pi * t[data == max(data)] * f)[0] # the condition data == max(data) can have several solutions # (for example when discretization is visible) # to prevent errors we pick the first solution # model.set_param_hint('period', expr='1') params = model.make_params(amplitude=amp_guess, phase=ph_guess, offset=offs_guess) params['amplitude'].min = 0 # Ensures positive amp return params def gauss_2D_guess(model, data, x, y): ''' takes the mean of every row/column and then uses the regular gauss guess function to get a guess for the model parameters. Assumptions on input data * input is a flattened version of a 2D grid. * total surface under the gaussians sums up to 1. Potential improvements: Make the input also accept a 2D grid of data to prevent reshaping. Find a way to extract amplitude guess from data itself, note that taking the sum of the data (which should correspond to all data under the curve) does not do the trick. Note: possibly not compatible if the model uses prefixes. ''' dx = x[1:]-x[:-1] dy = y[1:]-y[:-1] sums = np.sum(((data[:-1,:-1]*dx).transpose()*dy)) amp = np.sqrt(sums) data_grid = data.reshape(-1, len(np.unique(x))) x_proj_data = np.mean(data_grid, axis=0) y_proj_data = np.mean(data_grid, axis=1) x.sort() y.sort() xm = lmfit.models.GaussianModel() ym = lmfit.models.GaussianModel() x_guess = xm.guess(data=x_proj_data, x=np.unique(x)) x_res = xm.fit(data=x_proj_data, x=np.unique(x), params=x_guess) y_guess = ym.guess(data=y_proj_data, x=np.unique(y)) y_res = ym.fit(data=y_proj_data, x=np.unique(y), params=y_guess) x_guess = x_res.params y_guess = y_res.params model.set_param_hint('amplitude', value=amp, min=0.9*amp, max=1.1*amp, vary=True) model.set_param_hint('sigma_x', value=x_guess['sigma'].value, min=0, vary=True) model.set_param_hint('sigma_y', value=y_guess['sigma'].value, min=0, vary=True) params = model.make_params(center_x=x_guess['center'].value, center_y=y_guess['center'].value,) return params def double_gauss_2D_guess(model, data, x, y): ''' takes the mean of every row/column and then uses the guess function of the double gauss. Assumptions on input data * input is a flattened version of a 2D grid. Note: possibly not compatible if the model uses prefixes. Note 2: see also gauss_2D_guess() for some notes on how to improve this function. ''' data_grid = data.reshape(-1, len(np.unique(x))) x_proj_data = np.mean(data_grid, axis=0) y_proj_data = np.mean(data_grid, axis=1) # The syntax here is slighly different than when calling a regular guess # function because I do not overwrite the class attribute properly. x_guess = double_gauss_guess(model=None, data=x_proj_data, x=np.unique(x)) y_guess = double_gauss_guess(model=None, data=y_proj_data, x=np.unique(y)) if model is not None: pars = model.make_params(A_sigma_x=x_guess['A_sigma'], A_sigma_y=y_guess['A_sigma'], A_center_x=x_guess['A_center'], A_center_y=y_guess['A_center'], A_amplitude=1, B_sigma_x=x_guess['B_sigma'], B_sigma_y=y_guess['B_sigma'], B_center_y=y_guess['B_center'], B_center_x=x_guess['B_center'], B_amplitude=1) return pars else: return x_guess, y_guess def double_gauss_guess(model, data, x=None, **kwargs): ''' Finds a guess for the intial parametes of the double gauss model. Guess is based on taking the cumulative sum of the data and finding the points corresponding to 25% and 75% it finds sigma by using the property that ~33% of the data is contained in the range mu-sigma to mu+sigma. Tip: to use this assign this guess function as a method to a model use: model.guess = double_gauss_guess.__get__( model, model.__class__) ''' if x is None: x = np.arange(len(data)) cdf = np.cumsum(data) norm_cdf = cdf / cdf[-1] par_dict = {'A_center': x[(np.abs(norm_cdf - 0.25)).argmin()], 'B_center': x[(np.abs(norm_cdf - 0.75)).argmin()], 'A_sigma': (x[(np.abs(norm_cdf - 0.25 - .33 / 2)).argmin()] - x[(np.abs(norm_cdf - 0.25 + .33 / 2)).argmin()]), 'B_sigma': (x[(np.abs(norm_cdf - 0.75 - .33 / 2)).argmin()] - x[(np.abs(norm_cdf - 0.75 + .33 / 2)).argmin()])} amp = max(data) * (par_dict['A_sigma'] + par_dict['B_sigma']) / 2. if model is not None: # Specify explicitly because not all pars are set to those from the par # dict pars = model.make_params(A_center=par_dict['A_center'], B_center=par_dict['B_center'], A_sigma=par_dict['A_sigma'], B_sigma=par_dict['B_sigma'], A_amplitude=amp, B_amplitude=amp) return pars # The else clause is added explicitly to reuse this function for the # 2D double gauss model else: return par_dict def ro_double_gauss_guess(model, data, x, fixed_p01 = False, fixed_p10 = False): # An initial guess is done on the binned data with single gaussians # to constrain the fit params and avoid fitting noise if # e.g., mmt. ind. rel. is very low gmod0 = lmfit.models.GaussianModel() guess0 = gmod0.guess(data=data[0], x=x[0]) gmod1 = lmfit.models.GaussianModel() guess1 = gmod1.guess(data=data[1], x=x[1]) model.set_param_hint( 'A_center', vary=True, value=guess0['center'].value, min=guess0['center'] - 2 * guess0['sigma'], max=guess0['center'] + 2 * guess0['sigma']) model.set_param_hint( 'B_center', vary=True, value=guess1['center'].value, min=guess1['center'] - 2 * guess1['sigma'], max=guess1['center'] + 2 * guess1['sigma']) model.set_param_hint('A_sigma', value=guess0['sigma'].value, vary=True) model.set_param_hint('B_sigma', value=guess1['sigma'].value, vary=True) # Amplitudes intarea0 = sum_int(x=x[0], y=data[0])[-1] intarea1 = sum_int(x=x[1], y=data[1])[-1] model.set_param_hint('A_amplitude', value=intarea0, vary=False) model.set_param_hint('B_amplitude', value=intarea1, vary=False) model.set_param_hint('SNR', expr='abs(A_center-B_center)*2/(A_sigma+B_sigma)', vary=False) # Spurious excitement f = np.sqrt(2*np.pi) amp0 = 0.99 * np.max(data[0]) * guess0['sigma'] * f amp1 = 0.99 * np.max(data[1]) * guess1['sigma'] * f spurious0 = max(1-(amp0/intarea0), 1e-3) spurious1 = max(1-(amp1/intarea1), 1e-3) p01 = fixed_p01 if fixed_p01 is not False else spurious0 p10 = fixed_p10 if fixed_p10 is not False else spurious1 model.set_param_hint('A_spurious', value=p01, min=0, max=1, vary=fixed_p01 is False) model.set_param_hint('B_spurious', value=p10, min=0, max=1, vary=fixed_p10 is False) return model.make_params() def sum_int(x,y): return np.cumsum(y[:-1]*(x[1:]-x[:-1])) def DoubleGaussian(freq, sigma, mu, ampl, sigma0, mu0, ampl0, offset): ''' Double Gaussian function ''' return ampl/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu)/sigma)**2) + \ ampl0/(sigma0*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu0)/sigma0)**2) + \ offset def Gaussian(freq, sigma, mu, ampl, offset): ''' Gaussian function ''' return ampl/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((freq - mu)/sigma)**2) + offset def Gaussian_guess(model, data, freq, **kwargs): """ Tip: to use this assign this guess function as a method to a model use: model.guess = Gaussian_guess.__get__( model, model.__class__) """ mu_guess = freq[np.argmax(data)] offs_guess = np.median(data) p = (data - offs_guess)**2 p /= p.sum() sigma_guess = np.sqrt(((freq - mu_guess)**2 * p).sum())/10 amp_guess = max(data - offs_guess)*sigma_guess*np.sqrt(2*np.pi) params = model.make_params(sigma=sigma_guess, mu=mu_guess, ampl=amp_guess, offset=offs_guess) params['mu'].min = np.min(freq) params['mu'].max = np.max(freq) return params def half_feed_line_S12_J_func(omega, J, kappaPF, gammaPF, gammaRR, omegaPF, omegaRR, phi, A , B, alpha): return abs( A+np.exp(-1j*phi)*2*B*((-1+np.exp(1j*alpha))*(4*J**2+(gammaPF-2*1j*(omegaPF-omega))*(gammaRR-2j*omegaRR+2j*omega)))/(16*J**2+(4*gammaPF+(3+np.exp(1j*alpha))*kappaPF-8j*(omegaPF-omega))*(gammaRR-2j*omegaRR+2j*omega)) ) def half_feed_line_S12_J_guess(model,data): ''' data should have the frequencies in Hz in the first column and the transmission in the second column ''' background_guess = max(data,key=lambda item:item[1])[1]/2 amp_guess = max(data,key=lambda item:item[1])[0]-min(data,key=lambda item:item[1])[0] omegaPF_guess = min(data,key=lambda item:item[1])[0] #Remove the PF dip to find the second smallest feature at the RR frequency kappaPF_guess_data = np.extract( (data[:,1]<=(min(data,key=lambda item:item[1])[1]+ background_guess/2)),data[:,0]) kappaPF_guess = min(abs( kappaPF_guess_data[0]-omegaPF_guess),abs( kappaPF_guess_data[-1]-omegaPF_guess),5e10) omegaRR_guess = min( np.transpose([np.extract(abs(data[:,0]- omegaPF_guess) >= kappaPF_guess/2,data[:,0]),np.extract(abs(data[:,0]- omegaPF_guess) >= kappaPF_guess/2,data[:,1])]),key=lambda item:item[1])[0] J_guess = kappaPF_guess/4 model.set_param_hint('J',value=J_guess,min=0,max=2e7) model.set_param_hint('kappaPF',value=kappaPF_guess,min=1e6,max=1e8) model.set_param_hint('gammaPF',value=0.001,min=0,max=1e6) model.set_param_hint('gammaRR',value=0.001,min=0,max=1e6) model.set_param_hint('omegaPF',value=omegaPF_guess,min=omegaPF_guess-2e7,max=omegaPF_guess+2e7) model.set_param_hint('omegaRR',value=omegaRR_guess,min=omegaRR_guess-2e7,max=omegaRR_guess+2e7) model.set_param_hint('phi',value=(data[0,1]-data[-1,1])/background_guess) model.set_param_hint('A',value=background_guess,min=(background_guess)-5,max=(2000*background_guess)+5) model.set_param_hint('B',value=background_guess,min=(background_guess)-5,max=(2000*background_guess)+5) model.set_param_hint('alpha',value=3,min=0,max=10) params=model.make_params() return params def TwoErrorFunc_guess(model, delays, data): offset_guess = data[1] amp_guess = data[data.size//2] - data[1] delay_interval = (delays[-1]-delays[1]) mu_A_guess = delays[1] + 0.1*delay_interval mu_B_guess = delays[1] + 0.9*delay_interval sigma_guess = 3e-9 params = model.make_params(amp=amp_guess, mu_A=mu_A_guess, mu_B=mu_B_guess, sigma=sigma_guess, offset = offset_guess) return params def mixer_imbalance_sideband(alpha, phi_skew, g=1.0, phi=0.0, offset=0.0): """Analytical model for the max. amplitude of the unwanted SB of an IQ mixer. Args: alpha (float): Correction factor that was applied to the amplitude of the Q signal. phi_skew (float): Phase correction of the Q signal relative to the I signal. g (float, optional): Power ratio between the LO power splitter outputs. It is defined as power(LO_I)/power(LO_Q). Defaults to 1.0. phi (float, optional): Phase between the two ports of the LO power splitter in degree. Is defined as phase(LO_Q)-phase(LO_I). Defaults to 0 degree. offset (float, optional): Offset in dBV accounting for losses in the signal chain. Defaults to 0.0 dBV. Returns: float: maximum sideband amplitude for the specified parameters in dBV Model Schematic: I >------------------------------ I R ----+ LO | | | LO >------\-/-----------------------+ | X (90° power splitter) +----> RF --/-\--- 1/g*exp(i*phase) --+ | | | LO | Q >--- alpha*exp(i*phi_skew) ---- I R ----+ """ return 20*np.log10(np.abs(1 - alpha/g * np.exp(-1j*np.deg2rad(phi + phi_skew))) ) + offset def mixer_imbalance_sideband_guess(model, **kwargs): """Prepare and return parameters of an :py:lmfit.model: for the model mixer_imbalance_sideband. Args: model (:py:lmfit.model:): The model that the parameter hints will be added to and that is used to generate the parameters using the :py:lmfit.model.make_params() method. Returns: :py:lmfit.parameters: Parameters """ model.set_param_hint('g', value=1.0, min=0.5, max=1.5) model.set_param_hint('phi', value=0, min=-20, max=20) model.set_param_hint('offset', value=0.0, min=-4.0, max=+4.0) return model.make_params() def mixer_lo_leakage(vi, vq, li=0.0, lq=0.0, theta_i=0, theta_q=0, offset=0.0): """Model for maximum amplitude of LO leakage of an IQ mixer. Args: vi (:obj:'float'): DC bias voltage applied on the I input of the mixer. vq (:obj:'float'): DC bias voltage applied on the Q input of the mixer. li (float, optional): [TODO:description]. Defaults to 0.0. lq (float, optional): [TODO:description]. Defaults to 0.0. theta_i (int, optional): [TODO:description]. Defaults to 0.0. theta_q (int, optional): [TODO:description]. Defaults to 0.0. offset (float, optional): Offset in dBV accounting for losses in the signal chain. Defaults to 0.0 dBV. Returns: :obj:'float': maximum amplitude of the LO leakage for given parameters Model Schematic: I >-- +V_I ---------------- I R -- li*exp(i*theta_i) -------+ LO | | | LO >------------\-/-----------+ | X +----> RF --/-\-----------+ | | | LO | Q >-- +V_Q ---------------- I R -- lq*exp(i*theta_q-pi/2) --+ """ return 20*np.log10(np.abs(vi + li * np.exp(1j*theta_i) - 1j * vq + lq * np.exp(1j*(theta_q-np.pi/2)) )) + offset def mixer_lo_leakage_guess(model, **kwargs): """Prepare and return parameters of an :py:lmfit.model: for the model mixer_lo_leakage. Args: model (:py:lmfit.model:): The model that the parameter hints will be added to and that is used to generate the parameters. this model should have the following parameters: 'li', 'lq', 'theta_i', 'theta_q', 'scale' Returns: :py:lmfit.parameters: Parameters """ pi_half = np.pi/2 model.set_param_hint('li', value=0.0, min=0, max=1) model.set_param_hint('lq', value=0.0, min=0, max=1, vary=False) model.set_param_hint('theta_i', value=0.0, min=-pi_half, max=pi_half) model.set_param_hint('theta_q', value=0.0, min=-pi_half, max=pi_half, vary=False) model.set_param_hint('offset', value=0.0, min=-4.0, max=+4.0) return model.make_params() ################################# # User defined Models # ################################# # NOTE: it is actually better to instantiate the model within your analysis # file, this prevents the model params having a memory. # A valid reason to define it here would be exp_dec_guess if you want to add a guess function CosModel = lmfit.Model(CosFunc) CosModel.guess = Cos_guess half_Feed_lineS12_J_Model = lmfit.Model(half_feed_line_S12_J_func) half_Feed_lineS12_J_Model.guess = half_feed_line_S12_J_guess ExpDecayModel = lmfit.Model(ExpDecayFunc) TripleExpDecayModel = lmfit.Model(TripleExpDecayFunc) ExpDecayModel.guess = exp_dec_guess # todo: fix ExpDampOscModel = lmfit.Model(ExpDampOscFunc) GaussExpDampOscModel = lmfit.Model(GaussExpDampOscFunc) ExpDampDblOscModel = lmfit.Model(ExpDampDblOscFunc) DoubleExpDampOscModel = lmfit.Model(DoubleExpDampOscFunc) HangerAmplitudeModel = lmfit.Model(HangerFuncAmplitude) SlopedHangerAmplitudeModel = lmfit.Model(SlopedHangerFuncAmplitude) PolyBgHangerAmplitudeModel = lmfit.Model(PolyBgHangerFuncAmplitude) HangerComplexModel = lmfit.Model(HangerFuncComplex) SlopedHangerComplexModel = lmfit.Model(SlopedHangerFuncComplex) QubitFreqDacModel = lmfit.Model(Qubit_dac_to_freq) QubitFreqFluxModel = lmfit.Model(QubitFreqFlux) TwinLorentzModel = lmfit.Model(TwinLorentzFunc) LorentzianModel = lmfit.Model(Lorentzian) RBModel = lmfit.Model(RandomizedBenchmarkingDecay) LinOModel = lmfit.Model(linear_with_offset) LinBGModel = lmfit.Model(linear_with_background) LinBGOModel = lmfit.Model(linear_with_background_and_offset) ErfWindowModel = lmfit.Model(ErfWindow) GaussianModel_v2 = lmfit.models.GaussianModel GaussianModel = lmfit.Model(Gaussian) ExponentialModel = lmfit.models.ExponentialModel HangerWithPfModel = lmfit.Model(hanger_with_pf) SimHangerWithPfModel = lmfit.Model(simultan_hanger_with_pf, independent_vars=['f']) # 2D models Gaus2D_model = lmfit.Model(gaussian_2D, independent_vars=['x', 'y']) Gaus2D_model.guess = gauss_2D_guess # Note: not proper way to add guess func DoubleGauss2D_model = (lmfit.Model(gaussian_2D, independent_vars=['x', 'y'], prefix='A_') + lmfit.Model(gaussian_2D, independent_vars=['x', 'y'], prefix='B_')) DoubleGauss2D_model.guess = double_gauss_2D_guess ################################### # Models based on lmfit functions # ################################### LorentzModel = lmfit.Model(lmfit.models.lorentzian) Lorentz_w_background_Model = lmfit.models.LorentzianModel() + \ lmfit.models.LinearModel() PolyBgHangerAmplitudeModel = (HangerAmplitudeModel * lmfit.models.PolynomialModel(degree=7)) DoubleGaussModel = (lmfit.models.GaussianModel(prefix='A_') + lmfit.models.GaussianModel(prefix='B_')) DoubleGaussModel.guess = double_gauss_guess # defines a guess function def plot_fitres2D_heatmap(fit_res, x, y, axs=None, cmap='viridis'): ''' Convenience function for plotting results of flattened 2D fits. It could be argued this does not belong in fitting models (it is not a model) but I put it here as it is closely related to all the stuff we do with lmfit. If anyone has a better location in mind, let me know (MAR). ''' # fixing the data rotation with [::-1] nr_cols = len(np.unique(x)) data_2D = fit_res.data.reshape(-1, nr_cols, order='C')[::-1] fit_2D = fit_res.best_fit.reshape(-1, nr_cols, order='C')[::-1] guess_2D = fit_res.init_fit.reshape(-1, nr_cols, order='C')[::-1] if axs is None: f, axs = plt.subplots(1, 3, figsize=(14, 6)) axs[0].imshow(data_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[1].imshow(fit_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[2].imshow(guess_2D, extent=[x[0], x[-1], y[0], y[-1]], cmap=cmap, vmin=np.min(data_2D), vmax=np.max(data_2D)) axs[0].set_title('data') axs[1].set_title('fit-result') axs[2].set_title('initial guess') return axs # Before defining a new model, take a look at the built in models in lmfit. # From http://lmfit.github.io/lmfit-py/builtin_models.html # Built-in Fitting Models in the models module # Peak-like models # GaussianModel # LorentzianModel # VoigtModel # PseudoVoigtModel # Pearson7Model # StudentsTModel # BreitWignerModel # LognormalModel # DampedOcsillatorModel # ExponentialGaussianModel # SkewedGaussianModel # DonaichModel # Linear and Polynomial Models # ConstantModel # LinearModel # QuadraticModel # ParabolicModel # PolynomialModel # Step-like models # StepModel # RectangleModel # Exponential and Power law models # ExponentialModel # PowerLawModel
from collections import OrderedDict from copy import deepcopy from django.http import Http404, HttpResponseForbidden, JsonResponse, HttpResponse from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group from django.contrib.auth.decorators import login_required from django.db import IntegrityError from django.forms.models import modelformset_factory from django.shortcuts import render, redirect, get_object_or_404 from django.template import loader, RequestContext from bestiary.models import Monster, Fusion, Building from .forms import * from .filters import * from .models import Summoner, BuildingInstance, MonsterInstance, MonsterPiece, TeamGroup, Team, RuneInstance, RuneCraftInstance def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_user.groups.add(Group.objects.get(name='Summoners')) new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile_default', profile_name=user.username) except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) @login_required def change_username(request): user = request.user form = CrispyChangeUsernameForm(request.POST or None) context = { 'form': form, } if request.method == 'POST' and form.is_valid(): try: user.username = form.cleaned_data['username'] user.save() return redirect('username_change_complete') except IntegrityError: form.add_error('username', 'Username already taken') return render(request, 'registration/change_username.html', context) def change_username_complete(request): return render(request, 'registration/change_username_complete.html') @login_required def profile_delete(request, profile_name): user = request.user try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = DeleteProfileForm(request.POST or None) form.helper.form_action = reverse('herders:profile_delete', kwargs={'profile_name': profile_name}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): logout(request) user.delete() messages.warning(request, 'Your profile has been permanently deleted.') return redirect('news:latest_news') return render(request, 'herders/profile/profile_delete.html', context) else: return HttpResponseForbidden("You don't own this profile") @login_required def following(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_following', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'view': 'following', 'return_path': return_path, } return render(request, 'herders/profile/following/list.html', context) @login_required def follow_add(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 new_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.add(new_follower) messages.info(request, 'Now following %s' % new_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() @login_required def follow_remove(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 removed_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.remove(removed_follower) messages.info(request, 'Unfollowed %s' % removed_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() def profile(request, profile_name=None): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in.') try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_filter_form = FilterMonsterInstanceForm() monster_filter_form.helper.form_action = reverse('herders:monster_inventory', kwargs={'profile_name': profile_name}) context = { 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'monster_filter_form': monster_filter_form, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_inventory/base.html', context) else: return render(request, 'herders/profile/not_public.html') def buildings(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) all_buildings = Building.objects.all().order_by('name') building_data = [] total_glory_cost = 0 spent_glory = 0 total_guild_cost = 0 spent_guild = 0 for b in all_buildings: bldg_data = _building_data(summoner, b) if b.area == Building.AREA_GENERAL: total_glory_cost += sum(b.upgrade_cost) spent_glory += bldg_data['spent_upgrade_cost'] elif b.area == Building.AREA_GUILD: total_guild_cost += sum(b.upgrade_cost) spent_guild += bldg_data['spent_upgrade_cost'] building_data.append(bldg_data) context = { 'is_owner': is_owner, 'summoner': summoner, 'profile_name': profile_name, 'buildings': building_data, 'total_glory_cost': total_glory_cost, 'spent_glory': spent_glory, 'glory_progress': float(spent_glory) / total_glory_cost * 100, 'total_guild_cost': total_guild_cost, 'spent_guild': spent_guild, 'guild_progress': float(spent_guild) / total_guild_cost * 100, } return render(request, 'herders/profile/buildings/base.html', context) @login_required def building_edit(request, profile_name, building_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) base_building = get_object_or_404(Building, pk=building_id) try: owned_instance = BuildingInstance.objects.get(owner=summoner, building=base_building) except BuildingInstance.DoesNotExist: owned_instance = BuildingInstance.objects.create(owner=summoner, level=0, building=base_building) form = EditBuildingForm(request.POST or None, instance=owned_instance) form.helper.form_action = reverse('herders:building_edit', kwargs={'profile_name': profile_name, 'building_id': building_id}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): owned_instance = form.save() messages.success(request, 'Updated ' + owned_instance.building.name + ' to level ' + str(owned_instance.level)) template = loader.get_template('herders/profile/buildings/building_row_snippet.html') context = { 'is_owner': is_owner, 'bldg': _building_data(summoner, base_building) } response_data = { 'code': 'success', 'instance_id': building_id, 'html': template.render(RequestContext(request, context)) } else: template = loader.get_template('herders/profile/buildings/edit_form.html') response_data = { 'code': 'error', 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: return HttpResponseForbidden() def _building_data(summoner, building): percent_stat = building.affected_stat in Building.PERCENT_STATS total_upgrade_cost = sum(building.upgrade_cost) if building.area == Building.AREA_GENERAL: currency = 'glory_points.png' else: currency = 'guild_points.png' try: instance = BuildingInstance.objects.get(owner=summoner, building=building) if instance.level > 0: stat_bonus = building.stat_bonus[instance.level - 1] else: stat_bonus = 0 remaining_upgrade_cost = instance.remaining_upgrade_cost() except BuildingInstance.DoesNotExist: instance = None stat_bonus = 0 remaining_upgrade_cost = total_upgrade_cost return { 'base': building, 'instance': instance, 'stat_bonus': stat_bonus, 'percent_stat': percent_stat, 'spent_upgrade_cost': total_upgrade_cost - remaining_upgrade_cost, 'total_upgrade_cost': total_upgrade_cost, 'upgrade_progress': float(total_upgrade_cost - remaining_upgrade_cost) / total_upgrade_cost * 100, 'currency': currency, } def monster_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to ourself without the view mode or box grouping if view_mode: request.session['profile_view_mode'] = view_mode.lower() if box_grouping: request.session['profile_group_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Profile view mode cookie set") view_mode = request.session.get('profile_view_mode', 'list').lower() box_grouping = request.session.get('profile_group_method', 'grade').lower() try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 monster_queryset = MonsterInstance.committed.filter(owner=summoner) total_monsters = monster_queryset.count() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if view_mode == 'list': monster_queryset = monster_queryset.select_related('monster', 'monster__leader_skill', 'monster__awakens_from', 'monster__awakens_to').prefetch_related('monster__skills', 'monster__skills__skill_effect', 'runeinstance_set', 'team_set', 'team_leader') pieces = MonsterPiece.objects.filter(owner=summoner) form = FilterMonsterInstanceForm(request.POST or None) if form.is_valid(): monster_filter = MonsterInstanceFilter(form.cleaned_data, queryset=monster_queryset) else: monster_filter = MonsterInstanceFilter(queryset=monster_queryset) filtered_count = monster_filter.qs.count() context = { 'monsters': monster_filter, 'monster_pieces': pieces, 'total_count': total_monsters, 'filtered_count': filtered_count, 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'pieces': context['monster_pieces'] = MonsterPiece.committed.filter(owner=summoner) template = 'herders/profile/monster_inventory/summoning_pieces.html' elif view_mode == 'list': template = 'herders/profile/monster_inventory/list.html' else: # Group up the filtered monsters monster_stable = OrderedDict() if box_grouping == 'grade': monster_stable['6*'] = monster_filter.qs.filter(stars=6).order_by('-level', 'monster__element', 'monster__name') monster_stable['5*'] = monster_filter.qs.filter(stars=5).order_by('-level', 'monster__element', 'monster__name') monster_stable['4*'] = monster_filter.qs.filter(stars=4).order_by('-level', 'monster__element', 'monster__name') monster_stable['3*'] = monster_filter.qs.filter(stars=3).order_by('-level', 'monster__element', 'monster__name') monster_stable['2*'] = monster_filter.qs.filter(stars=2).order_by('-level', 'monster__element', 'monster__name') monster_stable['1*'] = monster_filter.qs.filter(stars=1).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'level': monster_stable['40'] = monster_filter.qs.filter(level=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['39-31'] = monster_filter.qs.filter(level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['30-21'] = monster_filter.qs.filter(level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['20-11'] = monster_filter.qs.filter(level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['10-1'] = monster_filter.qs.filter(level__lte=10).order_by('-level', '-stars', 'monster__element', 'monster__name') elif box_grouping == 'attribute': monster_stable['water'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'priority': monster_stable['High'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_HIGH).order_by('-level', 'monster__element', 'monster__name') monster_stable['Medium'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_MED).order_by('-level', 'monster__element', 'monster__name') monster_stable['Low'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_LOW).order_by('-level', 'monster__element', 'monster__name') monster_stable['None'] = monster_filter.qs.select_related('monster').filter(owner=summoner).filter(Q(priority=None) | Q(priority=0)).order_by('-level', 'monster__element', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable context['box_grouping'] = box_grouping template = 'herders/profile/monster_inventory/box.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.info(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def profile_storage(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path template = loader.get_template('herders/essence_storage.html') if request.method == 'POST' and form.is_valid(): form.save() messages.success(request, 'Updated essence storage.') response_data = { 'code': 'success' } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def quick_fodder_menu(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: template = loader.get_template('herders/profile/monster_inventory/quick_fodder_menu.html') response_data = { 'code': 'success', 'html': template.render(), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = AddMonsterInstanceForm(request.POST or None) else: form = AddMonsterInstanceForm(initial=request.GET.dict()) if request.method == 'POST' and form.is_valid(): # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') context = { 'profile_name': profile_name, 'instance': new_monster, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_monster.pk.hex, 'html': template.render(RequestContext(request, context)), } else: form.helper.form_action = reverse('herders:monster_instance_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/add_monster_form.html') # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_monster_form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: new_monster = MonsterInstance.committed.create(owner=summoner, monster=monster_to_add, stars=int(stars), level=int(level), fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_bulk_add(request, profile_name): return_path = reverse('herders:profile_default', kwargs={'profile_name': profile_name}) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) BulkAddFormset = modelformset_factory(MonsterInstance, form=BulkAddMonsterInstanceForm, formset=BulkAddMonsterInstanceFormset, extra=5, max_num=50) if request.method == 'POST': formset = BulkAddFormset(request.POST) else: formset = BulkAddFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'bulk_add_formset_action': request.path + '?next=' + return_path, 'view': 'profile', } if is_owner: if request.method == 'POST': if formset.is_valid(): new_instances = formset.save(commit=False) for new_instance in new_instances: try: if new_instance.monster: new_instance.owner = summoner if new_instance.monster.archetype == Monster.TYPE_MATERIAL: new_instance.priority = MonsterInstance.PRIORITY_DONE new_instance.save() messages.success(request, 'Added %s to your collection.' % new_instance) except ObjectDoesNotExist: # Blank form, don't care pass return redirect(return_path) else: raise PermissionDenied("Trying to bulk add to profile you don't own") context['bulk_add_formset'] = formset return render(request, 'herders/profile/monster_inventory/bulk_add_form.html', context) def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', request.path ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() context = { 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'instance': instance, 'is_owner': is_owner, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_view/base.html', context) else: return render(request, 'herders/profile/not_public.html') def monster_instance_view_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() instance_runes = [ instance.runeinstance_set.filter(slot=1).first(), instance.runeinstance_set.filter(slot=2).first(), instance.runeinstance_set.filter(slot=3).first(), instance.runeinstance_set.filter(slot=4).first(), instance.runeinstance_set.filter(slot=5).first(), instance.runeinstance_set.filter(slot=6).first(), ] context = { 'runes': instance_runes, 'instance': instance, 'profile_name': profile_name, 'is_owner': is_owner, } return render(request, 'herders/profile/monster_view/runes.html', context) def monster_instance_view_stats(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() context = { 'instance': instance, 'bldg_stats': instance.get_building_stats(), 'guild_stats': instance.get_building_stats(Building.AREA_GUILD), } return render(request, 'herders/profile/monster_view/stats.html', context) def monster_instance_view_skills(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) context = { 'instance': instance, 'skills': skills, } return render(request, 'herders/profile/monster_view/skills.html', context) def monster_instance_view_info(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() if instance.monster.is_awakened: ingredient_in = instance.monster.fusion_set.all() elif instance.monster.can_awaken and instance.monster.awakens_to: ingredient_in = instance.monster.awakens_to.fusion_set.all() else: ingredient_in = [] if instance.monster.is_awakened and instance.monster.awakens_from: product_of = instance.monster.awakens_from.product.first() elif instance.monster.can_awaken: product_of = instance.monster.product.first() else: product_of = [] context = { 'instance': instance, 'profile_name': profile_name, 'fusion_ingredient_in': ingredient_in, 'fusion_product_of': product_of, } return render(request, 'herders/profile/monster_view/notes_info.html', context) @login_required() def monster_instance_remove_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: try: instance = MonsterInstance.committed.get(pk=instance_id) except ObjectDoesNotExist: raise Http404() else: for rune in instance.runeinstance_set.all(): rune.assigned_to = None rune.save() instance.save() messages.success(request, 'Removed all runes from ' + str(instance)) response_data = { 'code': 'success', } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 instance = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) form = EditMonsterInstanceForm(request.POST or None, instance=instance) form.helper.form_action = request.path if len(skills) >= 1 and skills[0]['skill'].max_level > 1: form.helper['skill_1_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_1", data_skill_field=form['skill_1_level'].auto_id), ) form.helper['skill_1_level'].wrap(Field, min=1, max=skills[0]['skill'].max_level) form.fields['skill_1_level'].label = skills[0]['skill'].name + " Level" else: form.helper['skill_1_level'].wrap(Div, css_class="hidden") if len(skills) >= 2 and skills[1]['skill'].max_level > 1: form.helper['skill_2_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_2", data_skill_field=form['skill_2_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_2_level'].wrap(Field, min=1, max=skills[1]['skill'].max_level) form.fields['skill_2_level'].label = skills[1]['skill'].name + " Level" else: form.helper['skill_2_level'].wrap(Div, css_class="hidden") if len(skills) >= 3 and skills[2]['skill'].max_level > 1: form.helper['skill_3_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_3", data_skill_field=form['skill_3_level'].auto_id), min=1, max=skills[2]['skill'].max_level, ) form.helper['skill_3_level'].wrap(Field, min=1, max=skills[2]['skill'].max_level) form.fields['skill_3_level'].label = skills[2]['skill'].name + " Level" else: form.helper['skill_3_level'].wrap(Div, css_class="hidden") if len(skills) >= 4 and skills[3]['skill'].max_level > 1: form.helper['skill_4_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_4", data_skill_field=form['skill_4_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_4_level'].wrap(Field, min=1, max=skills[3]['skill'].max_level) form.fields['skill_4_level'].label = skills[3]['skill'].name + " Level" else: form.helper['skill_4_level'].wrap(Div, css_class="hidden") if not instance.monster.fusion_food: form.helper['ignore_for_fusion'].wrap(Div, css_class="hidden") if request.method == 'POST' and form.is_valid(): mon = form.save() messages.success(request, 'Successfully edited ' + str(mon)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'instance': mon, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': mon.pk.hex, 'html': template.render(RequestContext(request, context)), } else: # Return form filled in and errors shown template = loader.get_template('herders/profile/monster_view/edit_form.html') response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'edit_monster_form': form})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: messages.warning(request, 'Deleted ' + str(monster)) monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = PowerUpMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_power_up', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) context = { 'profile_name': request.user.username, 'monster': monster, 'is_owner': is_owner, 'form': form, 'view': 'profile', } validation_errors = {} response_data = { 'code': 'error' } if is_owner: if request.method == 'POST' and form.is_valid(): food_monsters = form.cleaned_data['monster'] # Check that monster is not being fed to itself if monster in food_monsters: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if not form.cleaned_data['ignore_evolution']: if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars or higher." % monster.stars # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.warning(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: response_data['code'] = 'success' else: response_data['code'] = 'edit' return JsonResponse(response_data) else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") template = loader.get_template('herders/profile/monster_view/power_up_form.html') # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors response_data['html'] = template.render(RequestContext(request, context)) return JsonResponse(response_data) @login_required() def monster_instance_awaken(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) template = loader.get_template('herders/profile/monster_view/awaken_form.html') form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_awaken', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if is_owner: if not monster.monster.is_awakened: if request.method == 'POST' and form.is_valid(): # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) summoner.storage_magic_high -= monster.monster.awaken_mats_magic_high summoner.storage_magic_mid -= monster.monster.awaken_mats_magic_mid summoner.storage_magic_low -= monster.monster.awaken_mats_magic_low summoner.storage_fire_high -= monster.monster.awaken_mats_fire_high summoner.storage_fire_mid -= monster.monster.awaken_mats_fire_mid summoner.storage_fire_low -= monster.monster.awaken_mats_fire_low summoner.storage_water_high -= monster.monster.awaken_mats_water_high summoner.storage_water_mid -= monster.monster.awaken_mats_water_mid summoner.storage_water_low -= monster.monster.awaken_mats_water_low summoner.storage_wind_high -= monster.monster.awaken_mats_wind_high summoner.storage_wind_mid -= monster.monster.awaken_mats_wind_mid summoner.storage_wind_low -= monster.monster.awaken_mats_wind_low summoner.storage_dark_high -= monster.monster.awaken_mats_dark_high summoner.storage_dark_mid -= monster.monster.awaken_mats_dark_mid summoner.storage_dark_low -= monster.monster.awaken_mats_dark_low summoner.storage_light_high -= monster.monster.awaken_mats_light_high summoner.storage_light_mid -= monster.monster.awaken_mats_light_mid summoner.storage_light_low -= monster.monster.awaken_mats_light_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() response_data = { 'code': 'success', 'removeElement': '#awakenMonsterButton', } else: storage = summoner.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().iteritems(): available_essences[element] = OrderedDict() for size, cost in essences.iteritems(): if cost > 0: available_essences[element][size] = { 'qty': storage[element][size], 'sufficient': storage[element][size] >= cost, } response_data = { 'code': 'error', 'html': template.render(RequestContext(request, { 'awaken_form': form, 'available_essences': available_essences, 'instance': monster, })) } else: error_template = loader.get_template('herders/profile/monster_already_awakened.html') response_data = { 'code': 'error', 'html': error_template.render(RequestContext(request, {})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_duplicate(request, profile_name, instance_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() messages.success(request, 'Succesfully copied ' + str(newmonster)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'is_owner': True, 'instance': newmonster, } response_data = { 'code': 'success', 'instance_id': newmonster.pk.hex, 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = MonsterPieceForm(request.POST or None) else: form = MonsterPieceForm() form.helper.form_action = reverse('herders:monster_piece_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if request.method == 'POST' and form.is_valid(): # Create the monster instance new_pieces = form.save(commit=False) new_pieces.owner = request.user.summoner new_pieces.save() messages.success(request, 'Added %s to your collection.' % new_pieces) response_data = { 'code': 'success' } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if is_owner: form = MonsterPieceForm(request.POST or None, instance=pieces) form.helper.form_action = request.path if request.method == 'POST' and form.is_valid(): new_piece = form.save() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': new_piece, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_piece.pk.hex, 'html': template.render(RequestContext(request, context)) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_summon(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if pieces.can_summon(): new_monster = MonsterInstance.committed.create(owner=summoner, monster=pieces.monster, stars=pieces.monster.base_stars, level=1, fodder=False, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) # Remove the pieces, delete if 0 pieces.pieces -= pieces.PIECE_REQUIREMENTS[pieces.monster.base_stars] pieces.save() if pieces.pieces <= 0: pieces.delete() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': pieces, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': pieces.pk.hex, 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) pieces = get_object_or_404(MonsterPiece, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == pieces.owner: messages.warning(request, 'Deleted ' + str(pieces)) pieces.delete() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) fusions = Fusion.objects.all() context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'fusions': fusions, } return render(request, 'herders/profile/fusion/base.html', context) def fusion_progress_detail(request, profile_name, monster_slug): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: try: fusion = Fusion.objects.get(product__bestiary_slug=monster_slug) except Fusion.DoesNotExist: raise Http404() else: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.committed.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).exists() # Scan summoner's collection for instances each ingredient fusion_ready = True for ingredient in fusion.ingredients.all().select_related('awakens_from', 'awakens_to'): owned_ingredients = MonsterInstance.committed.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') owned_ingredient_pieces = MonsterPiece.committed.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).first() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: if owned_ingredient_pieces: acquired = owned_ingredient_pieces.can_summon() else: acquired = False evolved = False leveled = False awakened = False complete = False if not complete: fusion_ready = False # Check if this ingredient is fusable if not acquired: try: sub_fusion = Fusion.objects.get(product=ingredient.awakens_from) except Fusion.DoesNotExist: sub_fusion_awakening_cost = None else: awakened_sub_fusion_ingredients = MonsterInstance.committed.filter( monster__pk__in=sub_fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) sub_fusion_awakening_cost = sub_fusion.total_awakening_cost(awakened_sub_fusion_ingredients) else: sub_fusion_awakening_cost = None ingredient_progress = { 'instance': ingredient, 'owned': owned_ingredients, 'pieces': owned_ingredient_pieces, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, 'sub_fusion_cost': sub_fusion_awakening_cost, } ingredients.append(ingredient_progress) awakened_owned_ingredients = MonsterInstance.committed.filter( monster__pk__in=fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) total_cost = fusion.total_awakening_cost(awakened_owned_ingredients) essences_satisfied, total_missing = fusion.missing_awakening_cost(summoner) # Determine the total/missing essences including sub-fusions if fusion.sub_fusion_available(): total_sub_fusion_cost = deepcopy(total_cost) for ingredient in ingredients: if ingredient['sub_fusion_cost']: for element, sizes in total_sub_fusion_cost.iteritems(): for size, qty in sizes.iteritems(): total_sub_fusion_cost[element][size] += ingredient['sub_fusion_cost'][element][size] # Now determine what's missing based on owner's storage storage = summoner.get_storage() sub_fusion_total_missing = { element: { size: total_sub_fusion_cost[element][size] - storage[element][size] if total_sub_fusion_cost[element][size] > storage[element][size] else 0 for size, qty in element_sizes.items() } for element, element_sizes in total_sub_fusion_cost.items() } sub_fusion_mats_satisfied = True for sizes in total_sub_fusion_cost.itervalues(): for qty in sizes.itervalues(): if qty > 0: sub_fusion_mats_satisfied = False else: sub_fusion_total_missing = None sub_fusion_mats_satisfied = None progress = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_mats_cost': total_cost, 'awakening_mats_sufficient': essences_satisfied, 'awakening_mats_missing': total_missing, 'sub_fusion_mats_missing': sub_fusion_total_missing, 'sub_fusion_mats_sufficient': sub_fusion_mats_satisfied, 'ready': fusion_ready, } context['fusion'] = progress return render(request, 'herders/profile/fusion/fusion_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_edit(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = EditTeamGroupForm(request.POST or None, instance=team_group) if is_owner: if form.is_valid() and request.method == 'POST': form.save() return redirect(return_path) else: return PermissionDenied("Editing a group you don't own") context = { 'profile_name': profile_name, 'summoner': summoner, 'form': form, 'group_id': group_id, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } return render(request, 'herders/profile/teams/team_group_edit.html', context) @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.warning(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader and team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/team_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.committed.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.committed.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.warning(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def runes(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) filter_form = FilterRuneForm(auto_id="filter_id_%s") filter_form.helper.form_action = reverse('herders:rune_inventory', kwargs={'profile_name': profile_name}) context = { 'view': 'runes', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'old_rune_count': RuneInstance.objects.filter(owner=summoner, substats__isnull=True).count(), 'rune_filter_form': filter_form, } if is_owner or summoner.public: return render(request, 'herders/profile/runes/base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to base profile URL if view_mode: request.session['rune_inventory_view_mode'] = view_mode.lower() if box_grouping: request.session['rune_inventory_box_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Rune view mode cookie set") view_mode = request.session.get('rune_inventory_view_mode', 'box').lower() box_grouping = request.session.get('rune_inventory_box_method', 'slot').lower() if view_mode == 'crafts': return rune_inventory_crafts(request, profile_name) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) rune_queryset = RuneInstance.committed.filter(owner=summoner).select_related('assigned_to', 'assigned_to__monster') total_count = rune_queryset.count() form = FilterRuneForm(request.POST or None) if form.is_valid(): rune_filter = RuneInstanceFilter(form.cleaned_data, queryset=rune_queryset) else: rune_filter = RuneInstanceFilter(None, queryset=rune_queryset) filtered_count = rune_filter.qs.count() context = { 'runes': rune_filter, 'total_count': total_count, 'filtered_count': filtered_count, 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'box': rune_box = OrderedDict() if box_grouping == 'slot': rune_box['Slot 1'] = rune_filter.qs.filter(slot=1) rune_box['Slot 2'] = rune_filter.qs.filter(slot=2) rune_box['Slot 3'] = rune_filter.qs.filter(slot=3) rune_box['Slot 4'] = rune_filter.qs.filter(slot=4) rune_box['Slot 5'] = rune_filter.qs.filter(slot=5) rune_box['Slot 6'] = rune_filter.qs.filter(slot=6) elif box_grouping == 'grade': rune_box['6*'] = rune_filter.qs.filter(stars=6) rune_box['5*'] = rune_filter.qs.filter(stars=5) rune_box['4*'] = rune_filter.qs.filter(stars=4) rune_box['3*'] = rune_filter.qs.filter(stars=3) rune_box['2*'] = rune_filter.qs.filter(stars=2) rune_box['1*'] = rune_filter.qs.filter(stars=1) elif box_grouping == 'equipped': rune_box['Not Equipped'] = rune_filter.qs.filter(assigned_to__isnull=True) rune_box['Equipped'] = rune_filter.qs.filter(assigned_to__isnull=False) elif box_grouping == 'type': for (type, type_name) in RuneInstance.TYPE_CHOICES: rune_box[type_name] = rune_filter.qs.filter(type=type) context['runes'] = rune_box context['box_grouping'] = box_grouping template = 'herders/profile/runes/inventory.html' elif view_mode == 'grid': template = 'herders/profile/runes/inventory_grid.html' else: template = 'herders/profile/runes/inventory_table.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory_crafts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: craft_box = OrderedDict() for (craft, craft_name) in RuneInstance.CRAFT_CHOICES: craft_box[craft_name] = OrderedDict() for rune, rune_name in RuneInstance.TYPE_CHOICES: craft_box[craft_name][rune_name] = RuneCraftInstance.committed.filter(owner=summoner, type=craft, rune=rune).order_by('stat', 'quality') context['crafts'] = craft_box return render(request, 'herders/profile/runes/inventory_crafts.html', context) else: return render(request, 'herders/profile/not_public.html') def rune_counts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: response_data = { 'code': 'success', 'counts': summoner.get_rune_counts() } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_add(request, profile_name): form = AddRuneInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_rune = form.save(commit=False) new_rune.owner = request.user.summoner new_rune.save() messages.success(request, 'Added ' + str(new_rune)) # Send back blank form form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: # Check for any pre-filled GET parameters slot = request.GET.get('slot', None) assigned_to = request.GET.get('assigned_to', None) form = AddRuneInstanceForm(initial={ 'assigned_to': assigned_to, 'slot': slot if slot is not None else 1, }) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) # Return form filled in and errors shown response_data = { 'html': template.render(RequestContext(request, {'add_rune_form': form})) } return JsonResponse(response_data) @login_required def rune_edit(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneInstanceForm(request.POST or None, instance=rune, auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) template = loader.get_template('herders/profile/runes/add_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm(auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_assign(request, profile_name, instance_id, slot=None): rune_queryset = RuneInstance.committed.filter(owner=request.user.summoner, assigned_to=None) filter_form = AssignRuneForm(request.POST or None, initial={'slot': slot}) filter_form.helper.form_action = reverse('herders:rune_assign', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if slot: rune_queryset = rune_queryset.filter(slot=slot) rune_filter = RuneInstanceFilter(request.POST, queryset=rune_queryset) if request.method == 'POST': template = loader.get_template('herders/profile/runes/assign_results.html') response_data = { 'code': 'results', 'html': template.render(RequestContext(request, { 'filter': rune_filter, 'profile_name': profile_name, 'instance_id': instance_id, })) } else: template = loader.get_template('herders/profile/runes/assign_form.html') response_data = { 'code': 'success', 'html': template.render(RequestContext(request, { 'filter': rune_filter, 'form': filter_form, 'profile_name': profile_name, 'instance_id': instance_id, })) } return JsonResponse(response_data) @login_required def rune_assign_choice(request, profile_name, instance_id, rune_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) rune = get_object_or_404(RuneInstance, pk=rune_id) if rune.assigned_to is not None: # TODO: Warn about removing from other monster? pass # Check for existing rune. existing_runes = monster.runeinstance_set.filter(slot=rune.slot) for existing_rune in existing_runes: existing_rune.assigned_to = None rune.assigned_to = monster rune.save() monster.save() response_data = { 'code': 'success', } return JsonResponse(response_data) @login_required def rune_unassign(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to rune.assigned_to = None rune.save() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def rune_unassign_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) assigned_mons = [] assigned_runes = RuneInstance.committed.filter(owner=summoner, assigned_to__isnull=False) number_assigned = assigned_runes.count() if is_owner: for rune in assigned_runes: if rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) rune.assigned_to = None rune.save() # Resave monster instances that had runes removed to recalc stats for mon in assigned_mons: mon.save() messages.success(request, 'Unassigned ' + str(number_assigned) + ' rune(s).') response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to messages.warning(request, 'Deleted ' + str(rune)) rune.delete() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Delete the runes death_row = RuneInstance.committed.filter(owner=summoner) number_killed = death_row.count() assigned_mons = [] for rune in death_row: if rune.assigned_to and rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) death_row.delete() # Delete the crafts RuneCraftInstance.committed.filter(owner=summoner).delete() messages.warning(request, 'Deleted ' + str(number_killed) + ' runes.') for mon in assigned_mons: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_resave_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: for r in RuneInstance.objects.filter(owner=summoner, substats__isnull=True): r.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_add(request, profile_name): form = AddRuneCraftInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_craft = form.save(commit=False) new_craft.owner = request.user.summoner new_craft.save() messages.success(request, 'Added ' + new_craft.get_type_display() + ' ' + str(new_craft)) # Send back blank form form = AddRuneCraftInstanceForm() form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'form': form})) } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } else: # Return form filled in and errors shown response_data = { 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) @login_required def rune_craft_edit(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneCraftInstanceForm(request.POST or None, instance=craft) form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'form': form})) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_delete(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: messages.warning(request, 'Deleted ' + craft.get_rune_display() + ' ' + str(craft)) craft.delete() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() Defined an auto_id for the monster filter form so the fields do not generate the same IDs as other forms. from collections import OrderedDict from copy import deepcopy from django.http import Http404, HttpResponseForbidden, JsonResponse, HttpResponse from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group from django.contrib.auth.decorators import login_required from django.db import IntegrityError from django.forms.models import modelformset_factory from django.shortcuts import render, redirect, get_object_or_404 from django.template import loader, RequestContext from bestiary.models import Monster, Fusion, Building from .forms import * from .filters import * from .models import Summoner, BuildingInstance, MonsterInstance, MonsterPiece, TeamGroup, Team, RuneInstance, RuneCraftInstance def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_user.groups.add(Group.objects.get(name='Summoners')) new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile_default', profile_name=user.username) except IntegrityError: form.add_error('username', 'Username already taken') context = {'form': form} return render(request, 'herders/register.html', context) @login_required def change_username(request): user = request.user form = CrispyChangeUsernameForm(request.POST or None) context = { 'form': form, } if request.method == 'POST' and form.is_valid(): try: user.username = form.cleaned_data['username'] user.save() return redirect('username_change_complete') except IntegrityError: form.add_error('username', 'Username already taken') return render(request, 'registration/change_username.html', context) def change_username_complete(request): return render(request, 'registration/change_username_complete.html') @login_required def profile_delete(request, profile_name): user = request.user try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = DeleteProfileForm(request.POST or None) form.helper.form_action = reverse('herders:profile_delete', kwargs={'profile_name': profile_name}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): logout(request) user.delete() messages.warning(request, 'Your profile has been permanently deleted.') return redirect('news:latest_news') return render(request, 'herders/profile/profile_delete.html', context) else: return HttpResponseForbidden("You don't own this profile") @login_required def following(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_following', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'view': 'following', 'return_path': return_path, } return render(request, 'herders/profile/following/list.html', context) @login_required def follow_add(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 new_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.add(new_follower) messages.info(request, 'Now following %s' % new_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() @login_required def follow_remove(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 removed_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.remove(removed_follower) messages.info(request, 'Unfollowed %s' % removed_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() def profile(request, profile_name=None): if profile_name is None: if request.user.is_authenticated(): profile_name = request.user.username else: raise Http404('No user profile specified and not logged in.') try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_filter_form = FilterMonsterInstanceForm(auto_id='id_filter_%s') monster_filter_form.helper.form_action = reverse('herders:monster_inventory', kwargs={'profile_name': profile_name}) context = { 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'monster_filter_form': monster_filter_form, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_inventory/base.html', context) else: return render(request, 'herders/profile/not_public.html') def buildings(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) all_buildings = Building.objects.all().order_by('name') building_data = [] total_glory_cost = 0 spent_glory = 0 total_guild_cost = 0 spent_guild = 0 for b in all_buildings: bldg_data = _building_data(summoner, b) if b.area == Building.AREA_GENERAL: total_glory_cost += sum(b.upgrade_cost) spent_glory += bldg_data['spent_upgrade_cost'] elif b.area == Building.AREA_GUILD: total_guild_cost += sum(b.upgrade_cost) spent_guild += bldg_data['spent_upgrade_cost'] building_data.append(bldg_data) context = { 'is_owner': is_owner, 'summoner': summoner, 'profile_name': profile_name, 'buildings': building_data, 'total_glory_cost': total_glory_cost, 'spent_glory': spent_glory, 'glory_progress': float(spent_glory) / total_glory_cost * 100, 'total_guild_cost': total_guild_cost, 'spent_guild': spent_guild, 'guild_progress': float(spent_guild) / total_guild_cost * 100, } return render(request, 'herders/profile/buildings/base.html', context) @login_required def building_edit(request, profile_name, building_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) base_building = get_object_or_404(Building, pk=building_id) try: owned_instance = BuildingInstance.objects.get(owner=summoner, building=base_building) except BuildingInstance.DoesNotExist: owned_instance = BuildingInstance.objects.create(owner=summoner, level=0, building=base_building) form = EditBuildingForm(request.POST or None, instance=owned_instance) form.helper.form_action = reverse('herders:building_edit', kwargs={'profile_name': profile_name, 'building_id': building_id}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): owned_instance = form.save() messages.success(request, 'Updated ' + owned_instance.building.name + ' to level ' + str(owned_instance.level)) template = loader.get_template('herders/profile/buildings/building_row_snippet.html') context = { 'is_owner': is_owner, 'bldg': _building_data(summoner, base_building) } response_data = { 'code': 'success', 'instance_id': building_id, 'html': template.render(RequestContext(request, context)) } else: template = loader.get_template('herders/profile/buildings/edit_form.html') response_data = { 'code': 'error', 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: return HttpResponseForbidden() def _building_data(summoner, building): percent_stat = building.affected_stat in Building.PERCENT_STATS total_upgrade_cost = sum(building.upgrade_cost) if building.area == Building.AREA_GENERAL: currency = 'glory_points.png' else: currency = 'guild_points.png' try: instance = BuildingInstance.objects.get(owner=summoner, building=building) if instance.level > 0: stat_bonus = building.stat_bonus[instance.level - 1] else: stat_bonus = 0 remaining_upgrade_cost = instance.remaining_upgrade_cost() except BuildingInstance.DoesNotExist: instance = None stat_bonus = 0 remaining_upgrade_cost = total_upgrade_cost return { 'base': building, 'instance': instance, 'stat_bonus': stat_bonus, 'percent_stat': percent_stat, 'spent_upgrade_cost': total_upgrade_cost - remaining_upgrade_cost, 'total_upgrade_cost': total_upgrade_cost, 'upgrade_progress': float(total_upgrade_cost - remaining_upgrade_cost) / total_upgrade_cost * 100, 'currency': currency, } def monster_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to ourself without the view mode or box grouping if view_mode: request.session['profile_view_mode'] = view_mode.lower() if box_grouping: request.session['profile_group_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Profile view mode cookie set") view_mode = request.session.get('profile_view_mode', 'list').lower() box_grouping = request.session.get('profile_group_method', 'grade').lower() try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 monster_queryset = MonsterInstance.committed.filter(owner=summoner) total_monsters = monster_queryset.count() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if view_mode == 'list': monster_queryset = monster_queryset.select_related('monster', 'monster__leader_skill', 'monster__awakens_from', 'monster__awakens_to').prefetch_related('monster__skills', 'monster__skills__skill_effect', 'runeinstance_set', 'team_set', 'team_leader') pieces = MonsterPiece.objects.filter(owner=summoner) form = FilterMonsterInstanceForm(request.POST or None, auto_id='id_filter_%s') if form.is_valid(): monster_filter = MonsterInstanceFilter(form.cleaned_data, queryset=monster_queryset) else: monster_filter = MonsterInstanceFilter(queryset=monster_queryset) filtered_count = monster_filter.qs.count() context = { 'monsters': monster_filter, 'monster_pieces': pieces, 'total_count': total_monsters, 'filtered_count': filtered_count, 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'pieces': context['monster_pieces'] = MonsterPiece.committed.filter(owner=summoner) template = 'herders/profile/monster_inventory/summoning_pieces.html' elif view_mode == 'list': template = 'herders/profile/monster_inventory/list.html' else: # Group up the filtered monsters monster_stable = OrderedDict() if box_grouping == 'grade': monster_stable['6*'] = monster_filter.qs.filter(stars=6).order_by('-level', 'monster__element', 'monster__name') monster_stable['5*'] = monster_filter.qs.filter(stars=5).order_by('-level', 'monster__element', 'monster__name') monster_stable['4*'] = monster_filter.qs.filter(stars=4).order_by('-level', 'monster__element', 'monster__name') monster_stable['3*'] = monster_filter.qs.filter(stars=3).order_by('-level', 'monster__element', 'monster__name') monster_stable['2*'] = monster_filter.qs.filter(stars=2).order_by('-level', 'monster__element', 'monster__name') monster_stable['1*'] = monster_filter.qs.filter(stars=1).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'level': monster_stable['40'] = monster_filter.qs.filter(level=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['39-31'] = monster_filter.qs.filter(level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['30-21'] = monster_filter.qs.filter(level__gt=20).filter(level__lte=30).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['20-11'] = monster_filter.qs.filter(level__gt=10).filter(level__lte=20).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['10-1'] = monster_filter.qs.filter(level__lte=10).order_by('-level', '-stars', 'monster__element', 'monster__name') elif box_grouping == 'attribute': monster_stable['water'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'priority': monster_stable['High'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_HIGH).order_by('-level', 'monster__element', 'monster__name') monster_stable['Medium'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_MED).order_by('-level', 'monster__element', 'monster__name') monster_stable['Low'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_LOW).order_by('-level', 'monster__element', 'monster__name') monster_stable['None'] = monster_filter.qs.select_related('monster').filter(owner=summoner).filter(Q(priority=None) | Q(priority=0)).order_by('-level', 'monster__element', 'monster__name') else: raise Http404('Invalid sort method') context['monster_stable'] = monster_stable context['box_grouping'] = box_grouping template = 'herders/profile/monster_inventory/box.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.info(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def profile_storage(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: form = EditEssenceStorageForm(request.POST or None, instance=request.user.summoner) form.helper.form_action = request.path template = loader.get_template('herders/essence_storage.html') if request.method == 'POST' and form.is_valid(): form.save() messages.success(request, 'Updated essence storage.') response_data = { 'code': 'success' } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def quick_fodder_menu(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: template = loader.get_template('herders/profile/monster_inventory/quick_fodder_menu.html') response_data = { 'code': 'success', 'html': template.render(), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = AddMonsterInstanceForm(request.POST or None) else: form = AddMonsterInstanceForm(initial=request.GET.dict()) if request.method == 'POST' and form.is_valid(): # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') context = { 'profile_name': profile_name, 'instance': new_monster, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_monster.pk.hex, 'html': template.render(RequestContext(request, context)), } else: form.helper.form_action = reverse('herders:monster_instance_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/add_monster_form.html') # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_monster_form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: new_monster = MonsterInstance.committed.create(owner=summoner, monster=monster_to_add, stars=int(stars), level=int(level), fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_bulk_add(request, profile_name): return_path = reverse('herders:profile_default', kwargs={'profile_name': profile_name}) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) BulkAddFormset = modelformset_factory(MonsterInstance, form=BulkAddMonsterInstanceForm, formset=BulkAddMonsterInstanceFormset, extra=5, max_num=50) if request.method == 'POST': formset = BulkAddFormset(request.POST) else: formset = BulkAddFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'bulk_add_formset_action': request.path + '?next=' + return_path, 'view': 'profile', } if is_owner: if request.method == 'POST': if formset.is_valid(): new_instances = formset.save(commit=False) for new_instance in new_instances: try: if new_instance.monster: new_instance.owner = summoner if new_instance.monster.archetype == Monster.TYPE_MATERIAL: new_instance.priority = MonsterInstance.PRIORITY_DONE new_instance.save() messages.success(request, 'Added %s to your collection.' % new_instance) except ObjectDoesNotExist: # Blank form, don't care pass return redirect(return_path) else: raise PermissionDenied("Trying to bulk add to profile you don't own") context['bulk_add_formset'] = formset return render(request, 'herders/profile/monster_inventory/bulk_add_form.html', context) def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', request.path ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() context = { 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'instance': instance, 'is_owner': is_owner, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_view/base.html', context) else: return render(request, 'herders/profile/not_public.html') def monster_instance_view_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() instance_runes = [ instance.runeinstance_set.filter(slot=1).first(), instance.runeinstance_set.filter(slot=2).first(), instance.runeinstance_set.filter(slot=3).first(), instance.runeinstance_set.filter(slot=4).first(), instance.runeinstance_set.filter(slot=5).first(), instance.runeinstance_set.filter(slot=6).first(), ] context = { 'runes': instance_runes, 'instance': instance, 'profile_name': profile_name, 'is_owner': is_owner, } return render(request, 'herders/profile/monster_view/runes.html', context) def monster_instance_view_stats(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() context = { 'instance': instance, 'bldg_stats': instance.get_building_stats(), 'guild_stats': instance.get_building_stats(Building.AREA_GUILD), } return render(request, 'herders/profile/monster_view/stats.html', context) def monster_instance_view_skills(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) context = { 'instance': instance, 'skills': skills, } return render(request, 'herders/profile/monster_view/skills.html', context) def monster_instance_view_info(request, profile_name, instance_id): try: instance = MonsterInstance.committed.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: raise Http404() if instance.monster.is_awakened: ingredient_in = instance.monster.fusion_set.all() elif instance.monster.can_awaken and instance.monster.awakens_to: ingredient_in = instance.monster.awakens_to.fusion_set.all() else: ingredient_in = [] if instance.monster.is_awakened and instance.monster.awakens_from: product_of = instance.monster.awakens_from.product.first() elif instance.monster.can_awaken: product_of = instance.monster.product.first() else: product_of = [] context = { 'instance': instance, 'profile_name': profile_name, 'fusion_ingredient_in': ingredient_in, 'fusion_product_of': product_of, } return render(request, 'herders/profile/monster_view/notes_info.html', context) @login_required() def monster_instance_remove_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: try: instance = MonsterInstance.committed.get(pk=instance_id) except ObjectDoesNotExist: raise Http404() else: for rune in instance.runeinstance_set.all(): rune.assigned_to = None rune.save() instance.save() messages.success(request, 'Removed all runes from ' + str(instance)) response_data = { 'code': 'success', } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 instance = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) form = EditMonsterInstanceForm(request.POST or None, instance=instance) form.helper.form_action = request.path if len(skills) >= 1 and skills[0]['skill'].max_level > 1: form.helper['skill_1_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_1", data_skill_field=form['skill_1_level'].auto_id), ) form.helper['skill_1_level'].wrap(Field, min=1, max=skills[0]['skill'].max_level) form.fields['skill_1_level'].label = skills[0]['skill'].name + " Level" else: form.helper['skill_1_level'].wrap(Div, css_class="hidden") if len(skills) >= 2 and skills[1]['skill'].max_level > 1: form.helper['skill_2_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_2", data_skill_field=form['skill_2_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_2_level'].wrap(Field, min=1, max=skills[1]['skill'].max_level) form.fields['skill_2_level'].label = skills[1]['skill'].name + " Level" else: form.helper['skill_2_level'].wrap(Div, css_class="hidden") if len(skills) >= 3 and skills[2]['skill'].max_level > 1: form.helper['skill_3_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_3", data_skill_field=form['skill_3_level'].auto_id), min=1, max=skills[2]['skill'].max_level, ) form.helper['skill_3_level'].wrap(Field, min=1, max=skills[2]['skill'].max_level) form.fields['skill_3_level'].label = skills[2]['skill'].name + " Level" else: form.helper['skill_3_level'].wrap(Div, css_class="hidden") if len(skills) >= 4 and skills[3]['skill'].max_level > 1: form.helper['skill_4_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_4", data_skill_field=form['skill_4_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_4_level'].wrap(Field, min=1, max=skills[3]['skill'].max_level) form.fields['skill_4_level'].label = skills[3]['skill'].name + " Level" else: form.helper['skill_4_level'].wrap(Div, css_class="hidden") if not instance.monster.fusion_food: form.helper['ignore_for_fusion'].wrap(Div, css_class="hidden") if request.method == 'POST' and form.is_valid(): mon = form.save() messages.success(request, 'Successfully edited ' + str(mon)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'instance': mon, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': mon.pk.hex, 'html': template.render(RequestContext(request, context)), } else: # Return form filled in and errors shown template = loader.get_template('herders/profile/monster_view/edit_form.html') response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'edit_monster_form': form})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: messages.warning(request, 'Deleted ' + str(monster)) monster.delete() return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_power_up(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = PowerUpMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_power_up', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) context = { 'profile_name': request.user.username, 'monster': monster, 'is_owner': is_owner, 'form': form, 'view': 'profile', } validation_errors = {} response_data = { 'code': 'error' } if is_owner: if request.method == 'POST' and form.is_valid(): food_monsters = form.cleaned_data['monster'] # Check that monster is not being fed to itself if monster in food_monsters: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if not form.cleaned_data['ignore_evolution']: if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars or higher." % monster.stars # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.warning(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: response_data['code'] = 'success' else: response_data['code'] = 'edit' return JsonResponse(response_data) else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") template = loader.get_template('herders/profile/monster_view/power_up_form.html') # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors response_data['html'] = template.render(RequestContext(request, context)) return JsonResponse(response_data) @login_required() def monster_instance_awaken(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) template = loader.get_template('herders/profile/monster_view/awaken_form.html') form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_awaken', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if is_owner: if not monster.monster.is_awakened: if request.method == 'POST' and form.is_valid(): # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) summoner.storage_magic_high -= monster.monster.awaken_mats_magic_high summoner.storage_magic_mid -= monster.monster.awaken_mats_magic_mid summoner.storage_magic_low -= monster.monster.awaken_mats_magic_low summoner.storage_fire_high -= monster.monster.awaken_mats_fire_high summoner.storage_fire_mid -= monster.monster.awaken_mats_fire_mid summoner.storage_fire_low -= monster.monster.awaken_mats_fire_low summoner.storage_water_high -= monster.monster.awaken_mats_water_high summoner.storage_water_mid -= monster.monster.awaken_mats_water_mid summoner.storage_water_low -= monster.monster.awaken_mats_water_low summoner.storage_wind_high -= monster.monster.awaken_mats_wind_high summoner.storage_wind_mid -= monster.monster.awaken_mats_wind_mid summoner.storage_wind_low -= monster.monster.awaken_mats_wind_low summoner.storage_dark_high -= monster.monster.awaken_mats_dark_high summoner.storage_dark_mid -= monster.monster.awaken_mats_dark_mid summoner.storage_dark_low -= monster.monster.awaken_mats_dark_low summoner.storage_light_high -= monster.monster.awaken_mats_light_high summoner.storage_light_mid -= monster.monster.awaken_mats_light_mid summoner.storage_light_low -= monster.monster.awaken_mats_light_low summoner.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() response_data = { 'code': 'success', 'removeElement': '#awakenMonsterButton', } else: storage = summoner.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().iteritems(): available_essences[element] = OrderedDict() for size, cost in essences.iteritems(): if cost > 0: available_essences[element][size] = { 'qty': storage[element][size], 'sufficient': storage[element][size] >= cost, } response_data = { 'code': 'error', 'html': template.render(RequestContext(request, { 'awaken_form': form, 'available_essences': available_essences, 'instance': monster, })) } else: error_template = loader.get_template('herders/profile/monster_already_awakened.html') response_data = { 'code': 'error', 'html': error_template.render(RequestContext(request, {})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_duplicate(request, profile_name, instance_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() messages.success(request, 'Succesfully copied ' + str(newmonster)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'is_owner': True, 'instance': newmonster, } response_data = { 'code': 'success', 'instance_id': newmonster.pk.hex, 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = MonsterPieceForm(request.POST or None) else: form = MonsterPieceForm() form.helper.form_action = reverse('herders:monster_piece_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if request.method == 'POST' and form.is_valid(): # Create the monster instance new_pieces = form.save(commit=False) new_pieces.owner = request.user.summoner new_pieces.save() messages.success(request, 'Added %s to your collection.' % new_pieces) response_data = { 'code': 'success' } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if is_owner: form = MonsterPieceForm(request.POST or None, instance=pieces) form.helper.form_action = request.path if request.method == 'POST' and form.is_valid(): new_piece = form.save() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': new_piece, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_piece.pk.hex, 'html': template.render(RequestContext(request, context)) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_summon(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if pieces.can_summon(): new_monster = MonsterInstance.committed.create(owner=summoner, monster=pieces.monster, stars=pieces.monster.base_stars, level=1, fodder=False, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) # Remove the pieces, delete if 0 pieces.pieces -= pieces.PIECE_REQUIREMENTS[pieces.monster.base_stars] pieces.save() if pieces.pieces <= 0: pieces.delete() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': pieces, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': pieces.pk.hex, 'html': template.render(RequestContext(request, context)) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) pieces = get_object_or_404(MonsterPiece, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == pieces.owner: messages.warning(request, 'Deleted ' + str(pieces)) pieces.delete() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) fusions = Fusion.objects.all() context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'fusions': fusions, } return render(request, 'herders/profile/fusion/base.html', context) def fusion_progress_detail(request, profile_name, monster_slug): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: try: fusion = Fusion.objects.get(product__bestiary_slug=monster_slug) except Fusion.DoesNotExist: raise Http404() else: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.committed.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).exists() # Scan summoner's collection for instances each ingredient fusion_ready = True for ingredient in fusion.ingredients.all().select_related('awakens_from', 'awakens_to'): owned_ingredients = MonsterInstance.committed.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') owned_ingredient_pieces = MonsterPiece.committed.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).first() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: if owned_ingredient_pieces: acquired = owned_ingredient_pieces.can_summon() else: acquired = False evolved = False leveled = False awakened = False complete = False if not complete: fusion_ready = False # Check if this ingredient is fusable if not acquired: try: sub_fusion = Fusion.objects.get(product=ingredient.awakens_from) except Fusion.DoesNotExist: sub_fusion_awakening_cost = None else: awakened_sub_fusion_ingredients = MonsterInstance.committed.filter( monster__pk__in=sub_fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) sub_fusion_awakening_cost = sub_fusion.total_awakening_cost(awakened_sub_fusion_ingredients) else: sub_fusion_awakening_cost = None ingredient_progress = { 'instance': ingredient, 'owned': owned_ingredients, 'pieces': owned_ingredient_pieces, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, 'sub_fusion_cost': sub_fusion_awakening_cost, } ingredients.append(ingredient_progress) awakened_owned_ingredients = MonsterInstance.committed.filter( monster__pk__in=fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) total_cost = fusion.total_awakening_cost(awakened_owned_ingredients) essences_satisfied, total_missing = fusion.missing_awakening_cost(summoner) # Determine the total/missing essences including sub-fusions if fusion.sub_fusion_available(): total_sub_fusion_cost = deepcopy(total_cost) for ingredient in ingredients: if ingredient['sub_fusion_cost']: for element, sizes in total_sub_fusion_cost.iteritems(): for size, qty in sizes.iteritems(): total_sub_fusion_cost[element][size] += ingredient['sub_fusion_cost'][element][size] # Now determine what's missing based on owner's storage storage = summoner.get_storage() sub_fusion_total_missing = { element: { size: total_sub_fusion_cost[element][size] - storage[element][size] if total_sub_fusion_cost[element][size] > storage[element][size] else 0 for size, qty in element_sizes.items() } for element, element_sizes in total_sub_fusion_cost.items() } sub_fusion_mats_satisfied = True for sizes in total_sub_fusion_cost.itervalues(): for qty in sizes.itervalues(): if qty > 0: sub_fusion_mats_satisfied = False else: sub_fusion_total_missing = None sub_fusion_mats_satisfied = None progress = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_mats_cost': total_cost, 'awakening_mats_sufficient': essences_satisfied, 'awakening_mats_missing': total_missing, 'sub_fusion_mats_missing': sub_fusion_total_missing, 'sub_fusion_mats_sufficient': sub_fusion_mats_satisfied, 'ready': fusion_ready, } context['fusion'] = progress return render(request, 'herders/profile/fusion/fusion_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_edit(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = EditTeamGroupForm(request.POST or None, instance=team_group) if is_owner: if form.is_valid() and request.method == 'POST': form.save() return redirect(return_path) else: return PermissionDenied("Editing a group you don't own") context = { 'profile_name': profile_name, 'summoner': summoner, 'form': form, 'group_id': group_id, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } return render(request, 'herders/profile/teams/team_group_edit.html', context) @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.warning(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader and team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/team_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.committed.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.committed.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.warning(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def runes(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) filter_form = FilterRuneForm(auto_id="filter_id_%s") filter_form.helper.form_action = reverse('herders:rune_inventory', kwargs={'profile_name': profile_name}) context = { 'view': 'runes', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'old_rune_count': RuneInstance.objects.filter(owner=summoner, substats__isnull=True).count(), 'rune_filter_form': filter_form, } if is_owner or summoner.public: return render(request, 'herders/profile/runes/base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to base profile URL if view_mode: request.session['rune_inventory_view_mode'] = view_mode.lower() if box_grouping: request.session['rune_inventory_box_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Rune view mode cookie set") view_mode = request.session.get('rune_inventory_view_mode', 'box').lower() box_grouping = request.session.get('rune_inventory_box_method', 'slot').lower() if view_mode == 'crafts': return rune_inventory_crafts(request, profile_name) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) rune_queryset = RuneInstance.committed.filter(owner=summoner).select_related('assigned_to', 'assigned_to__monster') total_count = rune_queryset.count() form = FilterRuneForm(request.POST or None) if form.is_valid(): rune_filter = RuneInstanceFilter(form.cleaned_data, queryset=rune_queryset) else: rune_filter = RuneInstanceFilter(None, queryset=rune_queryset) filtered_count = rune_filter.qs.count() context = { 'runes': rune_filter, 'total_count': total_count, 'filtered_count': filtered_count, 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'box': rune_box = OrderedDict() if box_grouping == 'slot': rune_box['Slot 1'] = rune_filter.qs.filter(slot=1) rune_box['Slot 2'] = rune_filter.qs.filter(slot=2) rune_box['Slot 3'] = rune_filter.qs.filter(slot=3) rune_box['Slot 4'] = rune_filter.qs.filter(slot=4) rune_box['Slot 5'] = rune_filter.qs.filter(slot=5) rune_box['Slot 6'] = rune_filter.qs.filter(slot=6) elif box_grouping == 'grade': rune_box['6*'] = rune_filter.qs.filter(stars=6) rune_box['5*'] = rune_filter.qs.filter(stars=5) rune_box['4*'] = rune_filter.qs.filter(stars=4) rune_box['3*'] = rune_filter.qs.filter(stars=3) rune_box['2*'] = rune_filter.qs.filter(stars=2) rune_box['1*'] = rune_filter.qs.filter(stars=1) elif box_grouping == 'equipped': rune_box['Not Equipped'] = rune_filter.qs.filter(assigned_to__isnull=True) rune_box['Equipped'] = rune_filter.qs.filter(assigned_to__isnull=False) elif box_grouping == 'type': for (type, type_name) in RuneInstance.TYPE_CHOICES: rune_box[type_name] = rune_filter.qs.filter(type=type) context['runes'] = rune_box context['box_grouping'] = box_grouping template = 'herders/profile/runes/inventory.html' elif view_mode == 'grid': template = 'herders/profile/runes/inventory_grid.html' else: template = 'herders/profile/runes/inventory_table.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory_crafts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: craft_box = OrderedDict() for (craft, craft_name) in RuneInstance.CRAFT_CHOICES: craft_box[craft_name] = OrderedDict() for rune, rune_name in RuneInstance.TYPE_CHOICES: craft_box[craft_name][rune_name] = RuneCraftInstance.committed.filter(owner=summoner, type=craft, rune=rune).order_by('stat', 'quality') context['crafts'] = craft_box return render(request, 'herders/profile/runes/inventory_crafts.html', context) else: return render(request, 'herders/profile/not_public.html') def rune_counts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: response_data = { 'code': 'success', 'counts': summoner.get_rune_counts() } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_add(request, profile_name): form = AddRuneInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_rune = form.save(commit=False) new_rune.owner = request.user.summoner new_rune.save() messages.success(request, 'Added ' + str(new_rune)) # Send back blank form form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: # Check for any pre-filled GET parameters slot = request.GET.get('slot', None) assigned_to = request.GET.get('assigned_to', None) form = AddRuneInstanceForm(initial={ 'assigned_to': assigned_to, 'slot': slot if slot is not None else 1, }) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) # Return form filled in and errors shown response_data = { 'html': template.render(RequestContext(request, {'add_rune_form': form})) } return JsonResponse(response_data) @login_required def rune_edit(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneInstanceForm(request.POST or None, instance=rune, auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) template = loader.get_template('herders/profile/runes/add_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm(auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'add_rune_form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_assign(request, profile_name, instance_id, slot=None): rune_queryset = RuneInstance.committed.filter(owner=request.user.summoner, assigned_to=None) filter_form = AssignRuneForm(request.POST or None, initial={'slot': slot}) filter_form.helper.form_action = reverse('herders:rune_assign', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if slot: rune_queryset = rune_queryset.filter(slot=slot) rune_filter = RuneInstanceFilter(request.POST, queryset=rune_queryset) if request.method == 'POST': template = loader.get_template('herders/profile/runes/assign_results.html') response_data = { 'code': 'results', 'html': template.render(RequestContext(request, { 'filter': rune_filter, 'profile_name': profile_name, 'instance_id': instance_id, })) } else: template = loader.get_template('herders/profile/runes/assign_form.html') response_data = { 'code': 'success', 'html': template.render(RequestContext(request, { 'filter': rune_filter, 'form': filter_form, 'profile_name': profile_name, 'instance_id': instance_id, })) } return JsonResponse(response_data) @login_required def rune_assign_choice(request, profile_name, instance_id, rune_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) rune = get_object_or_404(RuneInstance, pk=rune_id) if rune.assigned_to is not None: # TODO: Warn about removing from other monster? pass # Check for existing rune. existing_runes = monster.runeinstance_set.filter(slot=rune.slot) for existing_rune in existing_runes: existing_rune.assigned_to = None rune.assigned_to = monster rune.save() monster.save() response_data = { 'code': 'success', } return JsonResponse(response_data) @login_required def rune_unassign(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to rune.assigned_to = None rune.save() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def rune_unassign_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) assigned_mons = [] assigned_runes = RuneInstance.committed.filter(owner=summoner, assigned_to__isnull=False) number_assigned = assigned_runes.count() if is_owner: for rune in assigned_runes: if rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) rune.assigned_to = None rune.save() # Resave monster instances that had runes removed to recalc stats for mon in assigned_mons: mon.save() messages.success(request, 'Unassigned ' + str(number_assigned) + ' rune(s).') response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to messages.warning(request, 'Deleted ' + str(rune)) rune.delete() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Delete the runes death_row = RuneInstance.committed.filter(owner=summoner) number_killed = death_row.count() assigned_mons = [] for rune in death_row: if rune.assigned_to and rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) death_row.delete() # Delete the crafts RuneCraftInstance.committed.filter(owner=summoner).delete() messages.warning(request, 'Deleted ' + str(number_killed) + ' runes.') for mon in assigned_mons: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_resave_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: for r in RuneInstance.objects.filter(owner=summoner, substats__isnull=True): r.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_add(request, profile_name): form = AddRuneCraftInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_craft = form.save(commit=False) new_craft.owner = request.user.summoner new_craft.save() messages.success(request, 'Added ' + new_craft.get_type_display() + ' ' + str(new_craft)) # Send back blank form form = AddRuneCraftInstanceForm() form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'form': form})) } else: response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } else: # Return form filled in and errors shown response_data = { 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) @login_required def rune_craft_edit(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneCraftInstanceForm(request.POST or None, instance=craft) form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) response_data = { 'code': 'success', 'html': template.render(RequestContext(request, {'form': form})) } else: # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(RequestContext(request, {'form': form})) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_delete(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: raise Http404 is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: messages.warning(request, 'Deleted ' + craft.get_rune_display() + ' ' + str(craft)) craft.delete() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden()
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RDismo(RPackage): """Functions for species distribution modeling, that is, predicting entire geographic distributions form occurrences at a number of sites and the environment at these sites.""" homepage = "https://cloud.r-project.org/package=dismo" url = "https://cloud.r-project.org/src/contrib/dismo_1.1-4.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/dismo" version('1.1-4', sha256='f2110f716cd9e4cca5fd2b22130c6954658aaf61361d2fe688ba22bbfdfa97c8') depends_on('r@3.2.0:', type=('build', 'run')) depends_on('r-raster@2.5-2:', type=('build', 'run')) depends_on('r-sp@1.2-0:', type=('build', 'run')) add version 1.3-3 to r-dismo (#20883) # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RDismo(RPackage): """Species Distribution Modeling Methods for species distribution modeling, that is, predicting the environmental similarity of any site to that of the locations of known occurrences of a species.""" homepage = "https://cloud.r-project.org/package=dismo" url = "https://cloud.r-project.org/src/contrib/dismo_1.1-4.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/dismo" version('1.3-3', sha256='fd65331ac18a4287ba0856b90508ddd0e2738c653eecc5f3eb2b14e1d06949ca') version('1.1-4', sha256='f2110f716cd9e4cca5fd2b22130c6954658aaf61361d2fe688ba22bbfdfa97c8') depends_on('r@3.2.0:', type=('build', 'run')) depends_on('r-raster@2.5-2:', type=('build', 'run')) depends_on('r-sp@1.2-0:', type=('build', 'run')) depends_on('java@8:', when='@1.3-3:')
# -*- coding: utf-8 -*- """ Created on Thu Sep 7 21:14:25 2017 @author: Suhas Somnath, Chris Smith """ from __future__ import division, print_function, unicode_literals, absolute_import import sys import numpy as np from collections import Iterable import datetime from dtype_utils import contains_integers __all__ = ['clean_string_att', 'get_aux_dset_slicing', 'make_indices_matrix', 'INDICES_DTYPE', 'VALUES_DTYPE', 'Dimension', 'build_ind_val_matrices', 'calc_chunks', 'create_spec_inds_from_vals'] if sys.version_info.major == 3: unicode = str INDICES_DTYPE = np.uint32 VALUES_DTYPE = np.float32 class Dimension(object): def __init__(self, name, units, values): """ Simple object that describes a dimension in a dataset by its name, units, and values Parameters ---------- name : str / unicode Name of the dimension. For example 'Bias' units : str / unicode Units for this dimension. For example: 'V' values : array-like, or int Values over which this dimension was varied. A linearly increasing set of values will be generated if an integer is provided instead of an array. """ if not isinstance(name, (str, unicode)): raise TypeError('name should be a string') name = name.strip() if len(name) < 1: raise ValueError('name should not be an empty string') if not isinstance(units, (str, unicode)): raise TypeError('units should be a string') if isinstance(values, int): if values < 1: raise ValueError('values should at least be specified as a positive integer') values = np.arange(values) if not isinstance(values, (np.ndarray, list, tuple)): raise TypeError('values should be array-like') self.name = name self.units = units self.values = values def __repr__(self): return '{} ({}) : {}'.format(self.name, self.units, self.values) def get_aux_dset_slicing(dim_names, last_ind=None, is_spectroscopic=False): """ Returns a dictionary of slice objects to help in creating region references in the position or spectroscopic indices and values datasets Parameters ------------ dim_names : iterable List of strings denoting the names of the position axes or spectroscopic dimensions arranged in the same order that matches the dimensions in the indices / values dataset last_ind : (Optional) unsigned int, default = None Last pixel in the positon or spectroscopic matrix. Useful in experiments where the parameters have changed (eg. BEPS new data format) during the experiment. is_spectroscopic : bool, optional. default = True set to True for position datasets and False for spectroscopic datasets Returns ------------ slice_dict : dictionary Dictionary of tuples containing slice objects corresponding to each position axis. """ if not isinstance(dim_names, Iterable): raise TypeError('dim_names should be and Iterable') if not len(dim_names) > 0: raise ValueError('dim_names should not be empty') if not np.all([isinstance(x, (str, unicode)) for x in dim_names]): raise TypeError('dim_names should contain strings') slice_dict = dict() for spat_ind, curr_dim_name in enumerate(dim_names): val = (slice(last_ind), slice(spat_ind, spat_ind + 1)) if is_spectroscopic: val = val[::-1] slice_dict[str(curr_dim_name)] = val return slice_dict def make_indices_matrix(num_steps, is_position=True): """ Makes an ancillary indices matrix given the number of steps in each dimension. In other words, this function builds a matrix whose rows correspond to unique combinations of the multiple dimensions provided. Parameters ------------ num_steps : List / numpy array Number of steps in each spatial or spectral dimension Note that the axes must be ordered from fastest varying to slowest varying is_position : bool, optional, default = True Whether the returned matrix is meant for position (True) indices (tall and skinny) or spectroscopic (False) indices (short and wide) Returns -------------- indices_matrix : 2D unsigned int numpy array arranged as [steps, spatial dimension] """ if not isinstance(num_steps, (tuple, list, np.ndarray)): raise TypeError('num_steps should be a list / tuple / numpy array') if not contains_integers(num_steps, min_val=1 + int(len(num_steps) > 0)): raise ValueError('num_steps should contain integers greater than equal to 1 (empty dimension) or 2') num_steps = np.array(num_steps) spat_dims = max(1, len(np.where(num_steps > 1)[0])) indices_matrix = np.zeros(shape=(np.prod(num_steps), spat_dims), dtype=INDICES_DTYPE) dim_ind = 0 for indx, curr_steps in enumerate(num_steps): if curr_steps > 1: part1 = np.prod(num_steps[:indx + 1]) if indx > 0: part2 = np.prod(num_steps[:indx]) else: part2 = 1 if indx + 1 == len(num_steps): part3 = 1 else: part3 = np.prod(num_steps[indx + 1:]) indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1) / part2), part3) dim_ind += 1 if not is_position: indices_matrix = indices_matrix.T return indices_matrix def clean_string_att(att_val): """ Replaces any unicode objects within lists with their string counterparts to ensure compatibility with python 3. If the attribute is indeed a list of unicodes, the changes will be made in-place Parameters ---------- att_val : object Attribute object Returns ------- att_val : object Attribute object """ try: if isinstance(att_val, Iterable): if type(att_val) in [unicode, str]: return att_val elif np.any([type(x) in [str, unicode, bytes, np.str_] for x in att_val]): return np.array(att_val, dtype='S') if type(att_val) == np.str_: return str(att_val) return att_val except TypeError: raise TypeError('Failed to clean: {}'.format(att_val)) def build_ind_val_matrices(unit_values, is_spectral=True): """ Builds indices and values matrices using given unit values for each dimension. Parameters ---------- unit_values : list / tuple Sequence of values vectors for each dimension is_spectral : bool (optional), default = True If true, returns matrices for spectroscopic datasets, else returns matrices for Position datasets Returns ------- ind_mat : 2D numpy array Indices matrix val_mat : 2D numpy array Values matrix """ if not isinstance(unit_values, (list, tuple)): raise TypeError('unit_values should be a list or tuple') if not np.all([np.array(x).ndim == 1 for x in unit_values]): raise ValueError('unit_values should only contain 1D array') lengths = [len(x) for x in unit_values] tile_size = [np.prod(lengths[x:]) for x in range(1, len(lengths))] + [1] rep_size = [1] + [np.prod(lengths[:x]) for x in range(1, len(lengths))] val_mat = np.zeros(shape=(len(lengths), np.prod(lengths))) ind_mat = np.zeros(shape=val_mat.shape, dtype=np.uint32) for ind, ts, rs, vec in zip(range(len(lengths)), tile_size, rep_size, unit_values): val_mat[ind] = np.tile(np.repeat(vec, rs), ts) ind_mat[ind] = np.tile(np.repeat(np.arange(len(vec)), rs), ts) if not is_spectral: val_mat = val_mat.T ind_mat = ind_mat.T return INDICES_DTYPE(ind_mat), VALUES_DTYPE(val_mat) def create_spec_inds_from_vals(ds_spec_val_mat): """ Create new Spectroscopic Indices table from the changes in the Spectroscopic Values Parameters ---------- ds_spec_val_mat : array-like, Holds the spectroscopic values to be indexed Returns ------- ds_spec_inds_mat : numpy array of uints the same shape as ds_spec_val_mat Indices corresponding to the values in ds_spec_val_mat """ if not isinstance(ds_spec_val_mat, np.ndarray): raise TypeError('ds_spec_val_mat must be a numpy array') if ds_spec_val_mat.ndim != 2: raise ValueError('ds_spec_val_mat must be a 2D array arranged as [dimension, values]') ds_spec_inds_mat = np.zeros_like(ds_spec_val_mat, dtype=np.int32) """ Find how quickly the spectroscopic values are changing in each row and the order of row from fastest changing to slowest. """ change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec_val_mat] change_sort = np.argsort(change_count)[::-1] """ Determine everywhere the spectroscopic values change and build index table based on those changed """ indices = np.zeros(ds_spec_val_mat.shape[0]) for jcol in range(1, ds_spec_val_mat.shape[1]): this_col = ds_spec_val_mat[change_sort, jcol] last_col = ds_spec_val_mat[change_sort, jcol - 1] """ Check if current column values are different than those in last column. """ changed = np.where(this_col != last_col)[0] """ If only one row changed, increment the index for that column If more than one row has changed, increment the index for the last row that changed and set all others to zero """ if len(changed) == 1: indices[changed] += 1 elif len(changed > 1): for change in changed[:-1]: indices[change] = 0 indices[changed[-1]] += 1 """ Store the indices for the current column in the dataset """ ds_spec_inds_mat[change_sort, jcol] = indices return ds_spec_inds_mat def calc_chunks(dimensions, dtype_byte_size, unit_chunks=None, max_chunk_mem=10240): """ Calculate the chunk size for the HDF5 dataset based on the dimensions and the maximum chunk size in memory Parameters ---------- dimensions : array_like of int Shape of the data to be chunked dtype_byte_size : unsigned int Size of an entry in the data in bytes unit_chunks : array_like of int, optional Unit size of the chunking in each dimension. Must be the same size as the shape of `ds_main`. Default None, `unit_chunks` is set to 1 in all dimensions max_chunk_mem : int, optional Maximum size of the chunk in memory in bytes. Default 10240b or 10kb per h5py recommendations Returns ------- chunking : tuple of int Calculated maximum size of a chunk in each dimension that is as close to the requested `max_chunk_mem` as posible while having steps based on the input `unit_chunks`. """ if not isinstance(dimensions, (list, tuple)): raise TypeError('dimensions should either be a tuple or list') if not isinstance(dtype_byte_size, int): raise TypeError('dtype_byte_size should be an integer') if unit_chunks is not None: if not isinstance(unit_chunks, (tuple, list)): raise TypeError('unit_chunks should either be a tuple or list') ''' Ensure that dimensions is an array ''' dimensions = np.asarray(dimensions, dtype=np.uint) ''' Set the unit_chunks to all ones if not given. Ensure it is an array if it is. ''' if unit_chunks is None: unit_chunks = np.ones_like(dimensions) else: unit_chunks = np.asarray(unit_chunks, dtype=np.uint) if unit_chunks.shape != dimensions.shape: raise ValueError('Unit chunk size must have the same shape as the input dataset.') ''' Save the original size of unit_chunks to use for incrementing the chunk size during loop ''' base_chunks = unit_chunks.copy() ''' Loop until chunk_size is greater than the maximum chunk_mem or the chunk_size is equal to that of dimensions ''' while np.prod(unit_chunks) * dtype_byte_size <= max_chunk_mem: ''' Check if all chunk dimensions are greater or equal to the actual dimensions. Exit the loop if true. ''' if np.all(unit_chunks >= dimensions): break ''' Find the index of the next chunk to be increased and increment it by the base_chunk size ''' ichunk = np.argmax(dimensions / unit_chunks) unit_chunks[ichunk] += base_chunks[ichunk] ''' Ensure that the size of the chunks is between one and the dimension size. ''' unit_chunks = np.clip(unit_chunks, np.ones_like(unit_chunks), dimensions) chunking = tuple(unit_chunks) return chunking def write_dset_to_txt(input_file, output_file='output.csv'): """ Output an h5 file or a PycroDataset in csv format Parameters ---------- input_file : str path to the h5 input file that is to be translated into csv format output_file : str, optional path that the output file should be written to """ try: import h5py import pycroscopy as px import os except ImportError: print('something is not installed properly') if not isinstance(input_file, str): raise TypeError('input_file should be a path to an h5 file') h5File = h5py.File(input_file) pdRaw = px.PycroDataset(h5File['Measurement_000/Channel_000/Raw_Data']) specVals = pdRaw.h5_spec_vals posVals = pdRaw.h5_pos_vals dimUnits = pdRaw.spec_dim_descriptors pdPosDims = pdRaw.pos_dim_labels pdSpecDims = pdRaw.spec_dim_labels header = '' for idx, spec in enumerate(specVals): """ Obtain the units from the spectral dimension descriptors then create each line of the header with a spacer between the dimensions and the data """ unitStart = dimUnits[idx].find('(') + 1 unitEnd = dimUnits[idx].find(')') unit = dimUnits[idx][unitStart:unitEnd] header = header + ','.join(str(freq) + ' ' + unit for freq in spec) + '\n' header = header + ','.join('--------------------------------------------------------------' for idx in specVals[0]) """ Create the spectral and position labels for the dataset in string form then create the position value array in string form, right-strip the last comma from the string to deliver the correct number of values, append all of the labels and values together, save the data and header to a temporary csv output """ specLabel = '' for dim in pdSpecDims: specLabel = specLabel + ','.join('' for idx in pdPosDims) + str(dim) + ',\n' posLabel = ','.join(posL for posL in pdPosDims) + ',\n' posValOut = '' for val, posDim in enumerate(posVals): posValOut = posValOut + ','.join(str(posVal) for posVal in posVals[val]) + ',\n' posValOut = posValOut.rstrip('\n') output = specLabel + posLabel + posValOut np.savetxt('temp.csv', pdRaw, delimiter=',', header=header, comments='') left_dset = output.splitlines() with open('temp.csv', 'r+') as f, open(output_file, 'w') as b: for left_line, right_line in zip(left_dset, f): right_line = left_line + right_line b.write(right_line) os.remove('temp.csv') return output_file Add returns # -*- coding: utf-8 -*- """ Created on Thu Sep 7 21:14:25 2017 @author: Suhas Somnath, Chris Smith """ from __future__ import division, print_function, unicode_literals, absolute_import import sys import numpy as np from collections import Iterable import datetime from dtype_utils import contains_integers __all__ = ['clean_string_att', 'get_aux_dset_slicing', 'make_indices_matrix', 'INDICES_DTYPE', 'VALUES_DTYPE', 'Dimension', 'build_ind_val_matrices', 'calc_chunks', 'create_spec_inds_from_vals'] if sys.version_info.major == 3: unicode = str INDICES_DTYPE = np.uint32 VALUES_DTYPE = np.float32 class Dimension(object): def __init__(self, name, units, values): """ Simple object that describes a dimension in a dataset by its name, units, and values Parameters ---------- name : str / unicode Name of the dimension. For example 'Bias' units : str / unicode Units for this dimension. For example: 'V' values : array-like, or int Values over which this dimension was varied. A linearly increasing set of values will be generated if an integer is provided instead of an array. """ if not isinstance(name, (str, unicode)): raise TypeError('name should be a string') name = name.strip() if len(name) < 1: raise ValueError('name should not be an empty string') if not isinstance(units, (str, unicode)): raise TypeError('units should be a string') if isinstance(values, int): if values < 1: raise ValueError('values should at least be specified as a positive integer') values = np.arange(values) if not isinstance(values, (np.ndarray, list, tuple)): raise TypeError('values should be array-like') self.name = name self.units = units self.values = values def __repr__(self): return '{} ({}) : {}'.format(self.name, self.units, self.values) def get_aux_dset_slicing(dim_names, last_ind=None, is_spectroscopic=False): """ Returns a dictionary of slice objects to help in creating region references in the position or spectroscopic indices and values datasets Parameters ------------ dim_names : iterable List of strings denoting the names of the position axes or spectroscopic dimensions arranged in the same order that matches the dimensions in the indices / values dataset last_ind : (Optional) unsigned int, default = None Last pixel in the positon or spectroscopic matrix. Useful in experiments where the parameters have changed (eg. BEPS new data format) during the experiment. is_spectroscopic : bool, optional. default = True set to True for position datasets and False for spectroscopic datasets Returns ------------ slice_dict : dictionary Dictionary of tuples containing slice objects corresponding to each position axis. """ if not isinstance(dim_names, Iterable): raise TypeError('dim_names should be and Iterable') if not len(dim_names) > 0: raise ValueError('dim_names should not be empty') if not np.all([isinstance(x, (str, unicode)) for x in dim_names]): raise TypeError('dim_names should contain strings') slice_dict = dict() for spat_ind, curr_dim_name in enumerate(dim_names): val = (slice(last_ind), slice(spat_ind, spat_ind + 1)) if is_spectroscopic: val = val[::-1] slice_dict[str(curr_dim_name)] = val return slice_dict def make_indices_matrix(num_steps, is_position=True): """ Makes an ancillary indices matrix given the number of steps in each dimension. In other words, this function builds a matrix whose rows correspond to unique combinations of the multiple dimensions provided. Parameters ------------ num_steps : List / numpy array Number of steps in each spatial or spectral dimension Note that the axes must be ordered from fastest varying to slowest varying is_position : bool, optional, default = True Whether the returned matrix is meant for position (True) indices (tall and skinny) or spectroscopic (False) indices (short and wide) Returns -------------- indices_matrix : 2D unsigned int numpy array arranged as [steps, spatial dimension] """ if not isinstance(num_steps, (tuple, list, np.ndarray)): raise TypeError('num_steps should be a list / tuple / numpy array') if not contains_integers(num_steps, min_val=1 + int(len(num_steps) > 0)): raise ValueError('num_steps should contain integers greater than equal to 1 (empty dimension) or 2') num_steps = np.array(num_steps) spat_dims = max(1, len(np.where(num_steps > 1)[0])) indices_matrix = np.zeros(shape=(np.prod(num_steps), spat_dims), dtype=INDICES_DTYPE) dim_ind = 0 for indx, curr_steps in enumerate(num_steps): if curr_steps > 1: part1 = np.prod(num_steps[:indx + 1]) if indx > 0: part2 = np.prod(num_steps[:indx]) else: part2 = 1 if indx + 1 == len(num_steps): part3 = 1 else: part3 = np.prod(num_steps[indx + 1:]) indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1) / part2), part3) dim_ind += 1 if not is_position: indices_matrix = indices_matrix.T return indices_matrix def clean_string_att(att_val): """ Replaces any unicode objects within lists with their string counterparts to ensure compatibility with python 3. If the attribute is indeed a list of unicodes, the changes will be made in-place Parameters ---------- att_val : object Attribute object Returns ------- att_val : object Attribute object """ try: if isinstance(att_val, Iterable): if type(att_val) in [unicode, str]: return att_val elif np.any([type(x) in [str, unicode, bytes, np.str_] for x in att_val]): return np.array(att_val, dtype='S') if type(att_val) == np.str_: return str(att_val) return att_val except TypeError: raise TypeError('Failed to clean: {}'.format(att_val)) def build_ind_val_matrices(unit_values, is_spectral=True): """ Builds indices and values matrices using given unit values for each dimension. Parameters ---------- unit_values : list / tuple Sequence of values vectors for each dimension is_spectral : bool (optional), default = True If true, returns matrices for spectroscopic datasets, else returns matrices for Position datasets Returns ------- ind_mat : 2D numpy array Indices matrix val_mat : 2D numpy array Values matrix """ if not isinstance(unit_values, (list, tuple)): raise TypeError('unit_values should be a list or tuple') if not np.all([np.array(x).ndim == 1 for x in unit_values]): raise ValueError('unit_values should only contain 1D array') lengths = [len(x) for x in unit_values] tile_size = [np.prod(lengths[x:]) for x in range(1, len(lengths))] + [1] rep_size = [1] + [np.prod(lengths[:x]) for x in range(1, len(lengths))] val_mat = np.zeros(shape=(len(lengths), np.prod(lengths))) ind_mat = np.zeros(shape=val_mat.shape, dtype=np.uint32) for ind, ts, rs, vec in zip(range(len(lengths)), tile_size, rep_size, unit_values): val_mat[ind] = np.tile(np.repeat(vec, rs), ts) ind_mat[ind] = np.tile(np.repeat(np.arange(len(vec)), rs), ts) if not is_spectral: val_mat = val_mat.T ind_mat = ind_mat.T return INDICES_DTYPE(ind_mat), VALUES_DTYPE(val_mat) def create_spec_inds_from_vals(ds_spec_val_mat): """ Create new Spectroscopic Indices table from the changes in the Spectroscopic Values Parameters ---------- ds_spec_val_mat : array-like, Holds the spectroscopic values to be indexed Returns ------- ds_spec_inds_mat : numpy array of uints the same shape as ds_spec_val_mat Indices corresponding to the values in ds_spec_val_mat """ if not isinstance(ds_spec_val_mat, np.ndarray): raise TypeError('ds_spec_val_mat must be a numpy array') if ds_spec_val_mat.ndim != 2: raise ValueError('ds_spec_val_mat must be a 2D array arranged as [dimension, values]') ds_spec_inds_mat = np.zeros_like(ds_spec_val_mat, dtype=np.int32) """ Find how quickly the spectroscopic values are changing in each row and the order of row from fastest changing to slowest. """ change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec_val_mat] change_sort = np.argsort(change_count)[::-1] """ Determine everywhere the spectroscopic values change and build index table based on those changed """ indices = np.zeros(ds_spec_val_mat.shape[0]) for jcol in range(1, ds_spec_val_mat.shape[1]): this_col = ds_spec_val_mat[change_sort, jcol] last_col = ds_spec_val_mat[change_sort, jcol - 1] """ Check if current column values are different than those in last column. """ changed = np.where(this_col != last_col)[0] """ If only one row changed, increment the index for that column If more than one row has changed, increment the index for the last row that changed and set all others to zero """ if len(changed) == 1: indices[changed] += 1 elif len(changed > 1): for change in changed[:-1]: indices[change] = 0 indices[changed[-1]] += 1 """ Store the indices for the current column in the dataset """ ds_spec_inds_mat[change_sort, jcol] = indices return ds_spec_inds_mat def calc_chunks(dimensions, dtype_byte_size, unit_chunks=None, max_chunk_mem=10240): """ Calculate the chunk size for the HDF5 dataset based on the dimensions and the maximum chunk size in memory Parameters ---------- dimensions : array_like of int Shape of the data to be chunked dtype_byte_size : unsigned int Size of an entry in the data in bytes unit_chunks : array_like of int, optional Unit size of the chunking in each dimension. Must be the same size as the shape of `ds_main`. Default None, `unit_chunks` is set to 1 in all dimensions max_chunk_mem : int, optional Maximum size of the chunk in memory in bytes. Default 10240b or 10kb per h5py recommendations Returns ------- chunking : tuple of int Calculated maximum size of a chunk in each dimension that is as close to the requested `max_chunk_mem` as posible while having steps based on the input `unit_chunks`. """ if not isinstance(dimensions, (list, tuple)): raise TypeError('dimensions should either be a tuple or list') if not isinstance(dtype_byte_size, int): raise TypeError('dtype_byte_size should be an integer') if unit_chunks is not None: if not isinstance(unit_chunks, (tuple, list)): raise TypeError('unit_chunks should either be a tuple or list') ''' Ensure that dimensions is an array ''' dimensions = np.asarray(dimensions, dtype=np.uint) ''' Set the unit_chunks to all ones if not given. Ensure it is an array if it is. ''' if unit_chunks is None: unit_chunks = np.ones_like(dimensions) else: unit_chunks = np.asarray(unit_chunks, dtype=np.uint) if unit_chunks.shape != dimensions.shape: raise ValueError('Unit chunk size must have the same shape as the input dataset.') ''' Save the original size of unit_chunks to use for incrementing the chunk size during loop ''' base_chunks = unit_chunks.copy() ''' Loop until chunk_size is greater than the maximum chunk_mem or the chunk_size is equal to that of dimensions ''' while np.prod(unit_chunks) * dtype_byte_size <= max_chunk_mem: ''' Check if all chunk dimensions are greater or equal to the actual dimensions. Exit the loop if true. ''' if np.all(unit_chunks >= dimensions): break ''' Find the index of the next chunk to be increased and increment it by the base_chunk size ''' ichunk = np.argmax(dimensions / unit_chunks) unit_chunks[ichunk] += base_chunks[ichunk] ''' Ensure that the size of the chunks is between one and the dimension size. ''' unit_chunks = np.clip(unit_chunks, np.ones_like(unit_chunks), dimensions) chunking = tuple(unit_chunks) return chunking def write_dset_to_txt(input_file, output_file='output.csv'): """ Output an h5 file or a PycroDataset in csv format Parameters ---------- input_file : str path to the h5 input file that is to be translated into csv format output_file : str, optional path that the output file should be written to Returns ------- output_file: str """ try: import h5py import pycroscopy as px import os except ImportError: print('something is not installed properly') if not isinstance(input_file, str): raise TypeError('input_file should be a path to an h5 file') h5File = h5py.File(input_file) pdRaw = px.PycroDataset(h5File['Measurement_000/Channel_000/Raw_Data']) specVals = pdRaw.h5_spec_vals posVals = pdRaw.h5_pos_vals dimUnits = pdRaw.spec_dim_descriptors pdPosDims = pdRaw.pos_dim_labels pdSpecDims = pdRaw.spec_dim_labels header = '' for idx, spec in enumerate(specVals): """ Obtain the units from the spectral dimension descriptors then create each line of the header with a spacer between the dimensions and the data """ unitStart = dimUnits[idx].find('(') + 1 unitEnd = dimUnits[idx].find(')') unit = dimUnits[idx][unitStart:unitEnd] header = header + ','.join(str(freq) + ' ' + unit for freq in spec) + '\n' header = header + ','.join('--------------------------------------------------------------' for idx in specVals[0]) """ Create the spectral and position labels for the dataset in string form then create the position value array in string form, right-strip the last comma from the string to deliver the correct number of values, append all of the labels and values together, save the data and header to a temporary csv output """ specLabel = '' for dim in pdSpecDims: specLabel = specLabel + ','.join('' for idx in pdPosDims) + str(dim) + ',\n' posLabel = ','.join(posL for posL in pdPosDims) + ',\n' posValOut = '' for val, posDim in enumerate(posVals): posValOut = posValOut + ','.join(str(posVal) for posVal in posVals[val]) + ',\n' posValOut = posValOut.rstrip('\n') output = specLabel + posLabel + posValOut np.savetxt('temp.csv', pdRaw, delimiter=',', header=header, comments='') left_dset = output.splitlines() with open('temp.csv', 'r+') as f, open(output_file, 'w') as b: for left_line, right_line in zip(left_dset, f): right_line = left_line + right_line b.write(right_line) os.remove('temp.csv') return output_file
# future from __future__ import annotations # stdlib from typing import Any from typing import Dict from typing import Iterable from typing import Optional from typing import TYPE_CHECKING from typing import Tuple from typing import Union # third party import numpy as np from scipy.ndimage.interpolation import rotate # relative from ..common.serde.serializable import serializable from .broadcastable import is_broadcastable from .config import DEFAULT_FLOAT_NUMPY_TYPE from .config import DEFAULT_INT_NUMPY_TYPE from .passthrough import is_acceptable_simple_type # type: ignore from .smpc.utils import get_shape if TYPE_CHECKING: # relative from .autodp.phi_tensor import PhiTensor @serializable(recursive_serde=True) class lazyrepeatarray: """ A class representing Differential Privacy metadata (minimum and maximum values) in a way that saves RAM/CPU. We store large arrays of a single repeating value as a single tuple (shape) and a single value (int/float/etc) e.g. np.array([8,8,8,8,8,8]) = lazyrepeatarray(data=8, shape=(6,)) Think like the opposite of np.broadcast, repeated values along an axis are collapsed but the .shape attribute of the higher dimensional projection is retained for operations. ... Attributes: data: int/float the actual value that is repeating. shape: tuple the shape that the fully expanded array would be. Methods: to_numpy(): expands the lazyrepeatarray into the full sized numpy array it was representing. """ __attr_allowlist__ = ["data", "shape"] def __init__(self, data: np.ndarray, shape: Tuple[int, ...]) -> None: """ data: the raw data values without repeats shape: the shape of 'data' if repeats were included """ # NOTE: all additional arguments are assumed to be broadcast if dims are shorter # than that of data. Example: if data.shape == (2,3,4) and # min_vals.shape == (2,3), then it's assumed that the full min_vals.shape is # actually (2,3,4) where the last dim is simply copied. # Example2: if data.shape == (2,3,4) and min_vals.shape == (2,1,4), then the # middle dimension is supposed to be copied to be min_vals.shape == (2,3,4) # if necessary. This is just to keep the memory footprint (and computation) # as small as possible. if isinstance(data, (bool, int, float)): data = np.array(data) if isinstance(data, int): data = data.astype(DEFAULT_INT_NUMPY_TYPE) # type: ignore if isinstance(data, float): data = data.astype(DEFAULT_FLOAT_NUMPY_TYPE) # type: ignore # verify broadcasting works on shapes if -1 not in shape: np.broadcast_shapes(data.shape, shape) self.data = data self.shape = shape if isinstance(shape, Iterable): for val in shape: if val < 0: raise ValueError(f"Invalid shape: {shape}") def __getitem__(self, item: Union[str, int, slice]) -> lazyrepeatarray: if self.data.shape == self.shape: output = self.data[item] return lazyrepeatarray(data=output, shape=output.shape) elif self.data.size == 1: test_arr = np.ones(self.shape)[ item ] # TODO: Is there a better way to determine output shape? return lazyrepeatarray(data=self.data, shape=test_arr.shape) else: raise NotImplementedError def __add__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): return self.__class__(data=self.data + other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) if self.data.shape == other.data.shape: return self.__class__(data=self.data + other.data, shape=self.shape) else: return self.__class__(data=self.data + other.data, shape=self.shape) def __sub__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): res = self.data - other return self.__class__(data=res, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) if self.data.shape == other.data.shape: return self.__class__(data=self.data - other.data, shape=self.shape) else: return self.__class__(data=self.data - other.data, shape=self.shape) def __mul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): return self.__class__(data=self.data * other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( "Cannot broadcast arrays with shapes for LazyRepeatArray Multiplication:" + f" {self.shape} & {other.shape}" ) else: return self.__class__(data=self.data * other.data, shape=self.shape) def __matmul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): new_shape = get_shape("__matmul__", self.shape, other.shape) if self.data.size == 1: return self.__class__( data=np.matmul(np.ones(self.shape), other * self.data), shape=new_shape, ) return self.__class__(data=self.data.__matmul__(other), shape=new_shape) if self.shape[-1] != other.shape[-2]: raise Exception( f"cannot matrix multiply tensors with different shapes: {self.shape} and {other.shape}" ) result = self.to_numpy() @ other.to_numpy() return self.__class__(data=result, shape=result.shape) def __lshift__(self, other: Any) -> lazyrepeatarray: if is_acceptable_simple_type(other): return self.__class__(data=self.data << other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) return self.__class__(data=self.data << other.data, shape=self.shape) def __rshift__(self, other: Any) -> lazyrepeatarray: if is_acceptable_simple_type(other): return self.__class__(data=self.data >> other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) return self.__class__(data=self.data >> other.data, shape=self.shape) def zeros_like(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(np.zeros_like(self.to_numpy(), *args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def __rtruediv__(self, other: Any) -> lazyrepeatarray: res = (1 / self.data) * other return lazyrepeatarray(data=res, shape=self.shape) def __rmatmul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): new_shape = get_shape("__matmul__", other.shape, self.shape) if other.size == 1: return self.__class__( data=np.matmul(np.ones(other.shape), other * self.data), shape=new_shape, ) return self.__class__( data=self.to_numpy().__rmatmul__(other), shape=new_shape ) if other.shape[-1] != self.shape[0]: raise Exception( "cannot matrix multiply tensors with different shapes: {self.shape} and {other.shape}" ) result = self.to_numpy().__rmatmul__(other.to_numpy()) return self.__class__(data=result, shape=result.shape) def __pow__(self, exponent: int) -> lazyrepeatarray: if exponent == 2: return self * self raise Exception("not sure how to do this yet") def pad(self, pad_width: int, mode: str = "reflect") -> lazyrepeatarray: if mode == "reflect": new_shape = tuple([i + pad_width * 2 for i in self.shape]) if self.data.shape == self.shape: return lazyrepeatarray( data=np.pad(self.data, pad_width=pad_width, mode="reflect"), shape=new_shape, ) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=new_shape) else: raise NotImplementedError else: raise NotImplementedError def horizontal_flip(self) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=np.fliplr(self.data), shape=self.shape) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def vertical_flip(self) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=np.flipud(self.data), shape=self.shape) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def rotate(self, angle: int) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=rotate(self.data, angle), shape=self.shape) elif self.data.size == 1: # TODO: This is almost certainly incorrect return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def reshape(self, target_shape: Tuple) -> lazyrepeatarray: # TODO: Can we reshape without creating new objects if self.data.shape == self.shape: return lazyrepeatarray( data=self.data.reshape(target_shape), shape=target_shape ) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=target_shape) else: if not np.broadcast_shapes(self.data.shape, target_shape): raise NotImplementedError( f"data= {self.data.shape}, shape: {self.shape}" ) else: return lazyrepeatarray(data=self.data, shape=target_shape) def copy(self, order: Optional[str] = "K") -> lazyrepeatarray: return self.__class__(data=self.data.copy(order=order), shape=self.shape) @property def size(self) -> int: return np.prod(self.shape) def sum(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(self.to_numpy().sum(*args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def ones_like(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(np.ones_like(self.to_numpy(), *args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def __eq__(self, other: Any) -> lazyrepeatarray: # type: ignore if isinstance(other, lazyrepeatarray): if self.shape == other.shape: return lazyrepeatarray(data=self.data == other.data, shape=self.shape) else: result = (self.to_numpy() == other.to_numpy()).all() return lazyrepeatarray(data=np.array([result]), shape=result.shape) if isinstance(other, np.ndarray): try: _ = np.broadcast_shapes(self.shape, other.shape) result = (self.to_numpy() == other).all() return lazyrepeatarray(data=np.array([result]), shape=other.shape) except Exception as e: print( "Failed to compare lazyrepeatarray with " + f"{self.shape} == {other.shape} to numpy by broadcasting. {e}" ) raise e return self == other def __le__(self, other: Any) -> lazyrepeatarray: # type: ignore if isinstance(other, lazyrepeatarray): if self.shape == other.shape: return lazyrepeatarray(data=self.data <= other.data, shape=self.shape) else: result = (self.to_numpy() <= other.to_numpy()).all() return lazyrepeatarray(data=np.array([result]), shape=result.shape) if isinstance(other, np.ndarray): try: _ = np.broadcast_shapes(self.shape, other.shape) result = (self.to_numpy() <= other).all() return lazyrepeatarray(data=np.array([result]), shape=other.shape) except Exception as e: print( "Failed to compare lazyrepeatarray with " + f"{self.shape} == {other.shape} to numpy by broadcasting. {e}" ) raise e return self <= other def concatenate( self, other: lazyrepeatarray, *args: Any, **kwargs: Any ) -> lazyrepeatarray: if not isinstance(other, lazyrepeatarray): raise NotImplementedError dummy_res = np.concatenate( (np.empty(self.shape), np.empty(other.shape)), *args, **kwargs ) return lazyrepeatarray(data=self.data, shape=dummy_res.shape) @property def dtype(self) -> np.dtype: return self.data.dtype def astype(self, np_type: np.dtype) -> lazyrepeatarray: return self.__class__(self.data.astype(np_type), self.shape) def to_numpy(self) -> np.ndarray: return np.broadcast_to(self.data, self.shape) def __repr__(self) -> str: return f"<lazyrepeatarray data: {self.data} -> shape: {self.shape}>" def __bool__(self) -> bool: return self.data.__bool__() def all(self) -> bool: return self.data.all() def any(self) -> bool: return self.data.any() def transpose(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: dummy_res = self.to_numpy().transpose(*args, **kwargs) return lazyrepeatarray( data=self.data.transpose(*args, **kwargs), shape=dummy_res.shape ) # As the min and max values calculation is the same regardless of the tensor type, # We centralize this method as baseline for calculation for min/max values def compute_min_max( x_min_vals: lazyrepeatarray, x_max_vals: lazyrepeatarray, other: Union[PhiTensor, int, float, np.ndarray], op_str: str, *args: Any, **kwargs: Dict[Any, Any], ) -> Tuple[lazyrepeatarray, lazyrepeatarray]: min_vals: lazyrepeatarray max_vals: lazyrepeatarray if op_str in ["__add__", "__matmul__", "__rmatmul__", "__lshift__", "__rshift__"]: if is_acceptable_simple_type(other): min_vals = getattr(x_min_vals, op_str)(other) max_vals = getattr(x_max_vals, op_str)(other) elif hasattr(other, "min_vals") and hasattr(other, "max_vals"): min_vals = getattr(x_min_vals, op_str)(other.min_vals) # type: ignore max_vals = getattr(x_max_vals, op_str)(other.max_vals) # type: ignore else: raise ValueError( f"Not supported type for lazy repeat array computation: {type(other)}" ) elif op_str in ["__sub__", "__mul__"]: if is_acceptable_simple_type(other): min_vals = getattr(x_min_vals, op_str)(other) max_vals = getattr(x_max_vals, op_str)(other) elif hasattr(other, "min_vals") and hasattr(other, "max_vals"): min_min = getattr(x_min_vals.data, op_str)(other.min_vals.data) # type: ignore min_max = getattr(x_min_vals.data, op_str)(other.max_vals.data) # type: ignore max_min = getattr(x_max_vals.data, op_str)(other.min_vals.data) # type: ignore max_max = getattr(x_max_vals.data, op_str)(other.max_vals.data) # type: ignore _min_vals = np.minimum.reduce([min_min, min_max, max_min, max_max]) _max_vals = np.maximum.reduce([min_min, min_max, max_min, max_max]) min_vals = x_min_vals.copy() min_vals.data = _min_vals max_vals = x_max_vals.copy() max_vals.data = _max_vals else: raise ValueError( f"Not supported type for lazy repeat array computation: {type(other)}" ) elif op_str in [ "__gt__", "__lt__", "__le__", "__ge__", "__eq__", "__ne__", "__xor__", ]: min_vals = x_min_vals * 0 max_vals = (x_max_vals * 0) + 1 elif op_str == "sum": min_vals = lazyrepeatarray(data=np.array(x_min_vals.sum(axis=None)), shape=()) max_vals = lazyrepeatarray(data=np.array(x_max_vals.sum(axis=None)), shape=()) elif op_str in ["__pos__", "sort"]: min_vals = x_min_vals max_vals = x_max_vals elif op_str == "trace": # NOTE: This is potentially expensive min_val_data = x_min_vals.to_numpy().trace(*args, **kwargs) min_vals = lazyrepeatarray(data=min_val_data, shape=min_val_data.shape) max_val_data = x_max_vals.to_numpy().trace(*args, **kwargs) max_vals = lazyrepeatarray(data=max_val_data, shape=max_val_data.shape) elif op_str == "repeat": dummy_res = np.empty(x_min_vals.shape).repeat(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data.min(), shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data.max(), shape=dummy_res.shape) elif op_str == "min": dummy_res = np.empty(x_min_vals.shape).min(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data, shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data, shape=dummy_res.shape) elif op_str == "max": dummy_res = np.empty(x_min_vals.shape).max(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data, shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data, shape=dummy_res.shape) elif op_str == "ones_like": min_vals = x_min_vals.ones_like(*args, **kwargs) max_vals = x_max_vals.ones_like(*args, **kwargs) elif op_str == "copy": min_vals = x_min_vals.copy(*args, **kwargs) # type: ignore max_vals = x_max_vals.copy(*args, **kwargs) # type: ignore elif op_str == "__round__": min_vals = lazyrepeatarray( data=x_min_vals.data.round(*args, **kwargs), shape=x_min_vals.shape ) max_vals = lazyrepeatarray( data=x_max_vals.data.round(*args, **kwargs), shape=x_max_vals.shape elif op_str == "__pow__": if x_min_vals.data <= 0 <= x_max_vals.data: # If data is in range [-5, 5], it's possible the minimum is 0 and not (-5)^2 min_data = min(0, (x_min_vals.data.__pow__(*args, **kwargs)).min()) else: min_data = x_min_vals.data.__pow__(*args, **kwargs) min_vals = lazyrepeatarray(data=min_data, shape=x_min_vals.shape) max_vals = lazyrepeatarray( data=x_max_vals.data.__pow__(*args, **kwargs), shape=x_max_vals.shape ) elif op_str == "argsort": min_vals = x_min_vals * 0 max_vals = x_max_vals * 0 + np.prod(x_max_vals.shape) else: raise ValueError(f"Invaid Operation for LazyRepeatArray: {op_str}") return (min_vals, max_vals) fixed error in _round_ op # future from __future__ import annotations # stdlib from typing import Any from typing import Dict from typing import Iterable from typing import Optional from typing import TYPE_CHECKING from typing import Tuple from typing import Union # third party import numpy as np from scipy.ndimage.interpolation import rotate # relative from ..common.serde.serializable import serializable from .broadcastable import is_broadcastable from .config import DEFAULT_FLOAT_NUMPY_TYPE from .config import DEFAULT_INT_NUMPY_TYPE from .passthrough import is_acceptable_simple_type # type: ignore from .smpc.utils import get_shape if TYPE_CHECKING: # relative from .autodp.phi_tensor import PhiTensor @serializable(recursive_serde=True) class lazyrepeatarray: """ A class representing Differential Privacy metadata (minimum and maximum values) in a way that saves RAM/CPU. We store large arrays of a single repeating value as a single tuple (shape) and a single value (int/float/etc) e.g. np.array([8,8,8,8,8,8]) = lazyrepeatarray(data=8, shape=(6,)) Think like the opposite of np.broadcast, repeated values along an axis are collapsed but the .shape attribute of the higher dimensional projection is retained for operations. ... Attributes: data: int/float the actual value that is repeating. shape: tuple the shape that the fully expanded array would be. Methods: to_numpy(): expands the lazyrepeatarray into the full sized numpy array it was representing. """ __attr_allowlist__ = ["data", "shape"] def __init__(self, data: np.ndarray, shape: Tuple[int, ...]) -> None: """ data: the raw data values without repeats shape: the shape of 'data' if repeats were included """ # NOTE: all additional arguments are assumed to be broadcast if dims are shorter # than that of data. Example: if data.shape == (2,3,4) and # min_vals.shape == (2,3), then it's assumed that the full min_vals.shape is # actually (2,3,4) where the last dim is simply copied. # Example2: if data.shape == (2,3,4) and min_vals.shape == (2,1,4), then the # middle dimension is supposed to be copied to be min_vals.shape == (2,3,4) # if necessary. This is just to keep the memory footprint (and computation) # as small as possible. if isinstance(data, (bool, int, float)): data = np.array(data) if isinstance(data, int): data = data.astype(DEFAULT_INT_NUMPY_TYPE) # type: ignore if isinstance(data, float): data = data.astype(DEFAULT_FLOAT_NUMPY_TYPE) # type: ignore # verify broadcasting works on shapes if -1 not in shape: np.broadcast_shapes(data.shape, shape) self.data = data self.shape = shape if isinstance(shape, Iterable): for val in shape: if val < 0: raise ValueError(f"Invalid shape: {shape}") def __getitem__(self, item: Union[str, int, slice]) -> lazyrepeatarray: if self.data.shape == self.shape: output = self.data[item] return lazyrepeatarray(data=output, shape=output.shape) elif self.data.size == 1: test_arr = np.ones(self.shape)[ item ] # TODO: Is there a better way to determine output shape? return lazyrepeatarray(data=self.data, shape=test_arr.shape) else: raise NotImplementedError def __add__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): return self.__class__(data=self.data + other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) if self.data.shape == other.data.shape: return self.__class__(data=self.data + other.data, shape=self.shape) else: return self.__class__(data=self.data + other.data, shape=self.shape) def __sub__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): res = self.data - other return self.__class__(data=res, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) if self.data.shape == other.data.shape: return self.__class__(data=self.data - other.data, shape=self.shape) else: return self.__class__(data=self.data - other.data, shape=self.shape) def __mul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): return self.__class__(data=self.data * other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( "Cannot broadcast arrays with shapes for LazyRepeatArray Multiplication:" + f" {self.shape} & {other.shape}" ) else: return self.__class__(data=self.data * other.data, shape=self.shape) def __matmul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): new_shape = get_shape("__matmul__", self.shape, other.shape) if self.data.size == 1: return self.__class__( data=np.matmul(np.ones(self.shape), other * self.data), shape=new_shape, ) return self.__class__(data=self.data.__matmul__(other), shape=new_shape) if self.shape[-1] != other.shape[-2]: raise Exception( f"cannot matrix multiply tensors with different shapes: {self.shape} and {other.shape}" ) result = self.to_numpy() @ other.to_numpy() return self.__class__(data=result, shape=result.shape) def __lshift__(self, other: Any) -> lazyrepeatarray: if is_acceptable_simple_type(other): return self.__class__(data=self.data << other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) return self.__class__(data=self.data << other.data, shape=self.shape) def __rshift__(self, other: Any) -> lazyrepeatarray: if is_acceptable_simple_type(other): return self.__class__(data=self.data >> other, shape=self.shape) if not is_broadcastable(self.shape, other.shape): raise Exception( f"Cannot broadcast arrays with shapes: {self.shape} & {other.shape}" ) return self.__class__(data=self.data >> other.data, shape=self.shape) def zeros_like(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(np.zeros_like(self.to_numpy(), *args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def __rtruediv__(self, other: Any) -> lazyrepeatarray: res = (1 / self.data) * other return lazyrepeatarray(data=res, shape=self.shape) def __rmatmul__(self, other: Any) -> lazyrepeatarray: """ THIS MIGHT LOOK LIKE COPY-PASTED CODE! Don't touch it. It's going to get more complicated. """ if is_acceptable_simple_type(other): new_shape = get_shape("__matmul__", other.shape, self.shape) if other.size == 1: return self.__class__( data=np.matmul(np.ones(other.shape), other * self.data), shape=new_shape, ) return self.__class__( data=self.to_numpy().__rmatmul__(other), shape=new_shape ) if other.shape[-1] != self.shape[0]: raise Exception( "cannot matrix multiply tensors with different shapes: {self.shape} and {other.shape}" ) result = self.to_numpy().__rmatmul__(other.to_numpy()) return self.__class__(data=result, shape=result.shape) def __pow__(self, exponent: int) -> lazyrepeatarray: if exponent == 2: return self * self raise Exception("not sure how to do this yet") def pad(self, pad_width: int, mode: str = "reflect") -> lazyrepeatarray: if mode == "reflect": new_shape = tuple([i + pad_width * 2 for i in self.shape]) if self.data.shape == self.shape: return lazyrepeatarray( data=np.pad(self.data, pad_width=pad_width, mode="reflect"), shape=new_shape, ) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=new_shape) else: raise NotImplementedError else: raise NotImplementedError def horizontal_flip(self) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=np.fliplr(self.data), shape=self.shape) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def vertical_flip(self) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=np.flipud(self.data), shape=self.shape) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def rotate(self, angle: int) -> lazyrepeatarray: if self.data.shape == self.shape: return lazyrepeatarray(data=rotate(self.data, angle), shape=self.shape) elif self.data.size == 1: # TODO: This is almost certainly incorrect return lazyrepeatarray(data=self.data, shape=self.shape) else: raise NotImplementedError def reshape(self, target_shape: Tuple) -> lazyrepeatarray: # TODO: Can we reshape without creating new objects if self.data.shape == self.shape: return lazyrepeatarray( data=self.data.reshape(target_shape), shape=target_shape ) elif self.data.size == 1: return lazyrepeatarray(data=self.data, shape=target_shape) else: if not np.broadcast_shapes(self.data.shape, target_shape): raise NotImplementedError( f"data= {self.data.shape}, shape: {self.shape}" ) else: return lazyrepeatarray(data=self.data, shape=target_shape) def copy(self, order: Optional[str] = "K") -> lazyrepeatarray: return self.__class__(data=self.data.copy(order=order), shape=self.shape) @property def size(self) -> int: return np.prod(self.shape) def sum(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(self.to_numpy().sum(*args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def ones_like(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: res = np.array(np.ones_like(self.to_numpy(), *args, **kwargs)) return lazyrepeatarray(data=res, shape=res.shape) def __eq__(self, other: Any) -> lazyrepeatarray: # type: ignore if isinstance(other, lazyrepeatarray): if self.shape == other.shape: return lazyrepeatarray(data=self.data == other.data, shape=self.shape) else: result = (self.to_numpy() == other.to_numpy()).all() return lazyrepeatarray(data=np.array([result]), shape=result.shape) if isinstance(other, np.ndarray): try: _ = np.broadcast_shapes(self.shape, other.shape) result = (self.to_numpy() == other).all() return lazyrepeatarray(data=np.array([result]), shape=other.shape) except Exception as e: print( "Failed to compare lazyrepeatarray with " + f"{self.shape} == {other.shape} to numpy by broadcasting. {e}" ) raise e return self == other def __le__(self, other: Any) -> lazyrepeatarray: # type: ignore if isinstance(other, lazyrepeatarray): if self.shape == other.shape: return lazyrepeatarray(data=self.data <= other.data, shape=self.shape) else: result = (self.to_numpy() <= other.to_numpy()).all() return lazyrepeatarray(data=np.array([result]), shape=result.shape) if isinstance(other, np.ndarray): try: _ = np.broadcast_shapes(self.shape, other.shape) result = (self.to_numpy() <= other).all() return lazyrepeatarray(data=np.array([result]), shape=other.shape) except Exception as e: print( "Failed to compare lazyrepeatarray with " + f"{self.shape} == {other.shape} to numpy by broadcasting. {e}" ) raise e return self <= other def concatenate( self, other: lazyrepeatarray, *args: Any, **kwargs: Any ) -> lazyrepeatarray: if not isinstance(other, lazyrepeatarray): raise NotImplementedError dummy_res = np.concatenate( (np.empty(self.shape), np.empty(other.shape)), *args, **kwargs ) return lazyrepeatarray(data=self.data, shape=dummy_res.shape) @property def dtype(self) -> np.dtype: return self.data.dtype def astype(self, np_type: np.dtype) -> lazyrepeatarray: return self.__class__(self.data.astype(np_type), self.shape) def to_numpy(self) -> np.ndarray: return np.broadcast_to(self.data, self.shape) def __repr__(self) -> str: return f"<lazyrepeatarray data: {self.data} -> shape: {self.shape}>" def __bool__(self) -> bool: return self.data.__bool__() def all(self) -> bool: return self.data.all() def any(self) -> bool: return self.data.any() def transpose(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: dummy_res = self.to_numpy().transpose(*args, **kwargs) return lazyrepeatarray( data=self.data.transpose(*args, **kwargs), shape=dummy_res.shape ) # As the min and max values calculation is the same regardless of the tensor type, # We centralize this method as baseline for calculation for min/max values def compute_min_max( x_min_vals: lazyrepeatarray, x_max_vals: lazyrepeatarray, other: Union[PhiTensor, int, float, np.ndarray], op_str: str, *args: Any, **kwargs: Dict[Any, Any], ) -> Tuple[lazyrepeatarray, lazyrepeatarray]: min_vals: lazyrepeatarray max_vals: lazyrepeatarray if op_str in ["__add__", "__matmul__", "__rmatmul__", "__lshift__", "__rshift__"]: if is_acceptable_simple_type(other): min_vals = getattr(x_min_vals, op_str)(other) max_vals = getattr(x_max_vals, op_str)(other) elif hasattr(other, "min_vals") and hasattr(other, "max_vals"): min_vals = getattr(x_min_vals, op_str)(other.min_vals) # type: ignore max_vals = getattr(x_max_vals, op_str)(other.max_vals) # type: ignore else: raise ValueError( f"Not supported type for lazy repeat array computation: {type(other)}" ) elif op_str in ["__sub__", "__mul__"]: if is_acceptable_simple_type(other): min_vals = getattr(x_min_vals, op_str)(other) max_vals = getattr(x_max_vals, op_str)(other) elif hasattr(other, "min_vals") and hasattr(other, "max_vals"): min_min = getattr(x_min_vals.data, op_str)(other.min_vals.data) # type: ignore min_max = getattr(x_min_vals.data, op_str)(other.max_vals.data) # type: ignore max_min = getattr(x_max_vals.data, op_str)(other.min_vals.data) # type: ignore max_max = getattr(x_max_vals.data, op_str)(other.max_vals.data) # type: ignore _min_vals = np.minimum.reduce([min_min, min_max, max_min, max_max]) _max_vals = np.maximum.reduce([min_min, min_max, max_min, max_max]) min_vals = x_min_vals.copy() min_vals.data = _min_vals max_vals = x_max_vals.copy() max_vals.data = _max_vals else: raise ValueError( f"Not supported type for lazy repeat array computation: {type(other)}" ) elif op_str in [ "__gt__", "__lt__", "__le__", "__ge__", "__eq__", "__ne__", "__xor__", ]: min_vals = x_min_vals * 0 max_vals = (x_max_vals * 0) + 1 elif op_str == "sum": min_vals = lazyrepeatarray(data=np.array(x_min_vals.sum(axis=None)), shape=()) max_vals = lazyrepeatarray(data=np.array(x_max_vals.sum(axis=None)), shape=()) elif op_str in ["__pos__", "sort"]: min_vals = x_min_vals max_vals = x_max_vals elif op_str == "trace": # NOTE: This is potentially expensive min_val_data = x_min_vals.to_numpy().trace(*args, **kwargs) min_vals = lazyrepeatarray(data=min_val_data, shape=min_val_data.shape) max_val_data = x_max_vals.to_numpy().trace(*args, **kwargs) max_vals = lazyrepeatarray(data=max_val_data, shape=max_val_data.shape) elif op_str == "repeat": dummy_res = np.empty(x_min_vals.shape).repeat(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data.min(), shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data.max(), shape=dummy_res.shape) elif op_str == "min": dummy_res = np.empty(x_min_vals.shape).min(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data, shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data, shape=dummy_res.shape) elif op_str == "max": dummy_res = np.empty(x_min_vals.shape).max(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data, shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data, shape=dummy_res.shape) elif op_str == "ones_like": min_vals = x_min_vals.ones_like(*args, **kwargs) max_vals = x_max_vals.ones_like(*args, **kwargs) elif op_str == "copy": min_vals = x_min_vals.copy(*args, **kwargs) # type: ignore max_vals = x_max_vals.copy(*args, **kwargs) # type: ignore elif op_str == "__round__": min_vals = lazyrepeatarray( data=x_min_vals.data.round(*args, **kwargs), shape=x_min_vals.shape ) max_vals = lazyrepeatarray( data=x_max_vals.data.round(*args, **kwargs), shape=x_max_vals.shape ) elif op_str == "__pow__": if x_min_vals.data <= 0 <= x_max_vals.data: # If data is in range [-5, 5], it's possible the minimum is 0 and not (-5)^2 min_data = min(0, (x_min_vals.data.__pow__(*args, **kwargs)).min()) else: min_data = x_min_vals.data.__pow__(*args, **kwargs) min_vals = lazyrepeatarray(data=min_data, shape=x_min_vals.shape) max_vals = lazyrepeatarray( data=x_max_vals.data.__pow__(*args, **kwargs), shape=x_max_vals.shape ) elif op_str == "argsort": min_vals = x_min_vals * 0 max_vals = x_max_vals * 0 + np.prod(x_max_vals.shape) else: raise ValueError(f"Invaid Operation for LazyRepeatArray: {op_str}") return (min_vals, max_vals)
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import os import logging import ConfigParser from lib.cuckoo.common.exceptions import CuckooMachineError from lib.cuckoo.common.constants import CUCKOO_ROOT log = logging.getLogger(__name__) class Dictionary(dict): """Cuckoo custom dict.""" def __getattr__(self, key): return self.get(key, None) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ class MachineManager(object): """Base abstract class for analysis machine manager.""" def __init__(self): self.module_name = "" self.options = None self.machines = [] def set_options(self, options): """Set machine manager options. @param options: machine manager options dict. """ self.options = options def initialize(self, module_name): """Read and load machines configuration, try to check the configuration. @param module_name: module name. """ # Load. self._initialize(module_name) # Run initialization checks. self._initialize_check() def _initialize(self, module_name): """Read configuration. @param module_name: module name. """ self.module_name = module_name mmanager_opts = self.options.get(module_name) for machine_id in mmanager_opts["machines"].strip().split(","): try: machine_opts = self.options.get(machine_id) machine = Dictionary() machine.id = machine_id machine.label = machine_opts["label"] machine.platform = machine_opts["platform"] machine.ip = machine_opts["ip"] machine.locked = False self.machines.append(machine) except AttributeError: log.warning("Configuration details about machine %s are missing. Continue" % machine_id) continue def _initialize_check(self): """Runs all checks when a machine manager is initialized. @note: in machine manager modules you may override or superclass this method. @raise CuckooMachineError: if a misconfiguration or a unkown vm state is found. """ # Checks if machines configured are really available. try: configured_vm = self._list() for machine in self.machines: if machine.label not in configured_vm: raise CuckooMachineError("Configured machine %s was not detected or it's not in proper state" % machine.label) except NotImplementedError: pass def availables(self): """How many machines are free. @return: free machines count. """ count = 0 for machine in self.machines: if not machine.locked: count += 1 return count def acquire(self, machine_id=None, platform=None): """Acquire a machine to start analysis. @param machine_id: machine ID. @param platform: machine platform. @return: machine or None. """ if machine_id: for machine in self.machines: if machine.id == machine_id and not machine.locked: machine.locked = True return machine elif platform: for machine in self.machines: if machine.platform == platform and not machine.locked: machine.locked = True return machine else: for machine in self.machines: if not machine.locked: machine.locked = True return machine return None def release(self, label=None): """Release a machine. @param label: machine name. """ if label: for machine in self.machines: if machine.label == label: machine.locked = False def running(self): """Returns running virutal machines. @return: running virtual machines list. """ return [m for m in self.machines if m.locked] def start(self, label=None): """Start a machine. @param label: machine name. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError def stop(self, label=None): """Stop a machine. @param label: machine name. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError def _list(self): """Lists virtual machines configured. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Processing(object): """Base abstract class for processing module.""" def __init__(self): self.analysis_path = "" self.logs_path = "" def set_path(self, analysis_path): """Set paths. @param analysis_path: analysis folder path. """ self.analysis_path = analysis_path self.log_path = os.path.join(self.analysis_path, "analysis.log") self.conf_path = os.path.join(self.analysis_path, "analysis.conf") self.file_path = os.path.realpath(os.path.join(self.analysis_path, "binary")) self.dropped_path = os.path.join(self.analysis_path, "files") self.logs_path = os.path.join(self.analysis_path, "logs") self.shots_path = os.path.join(self.analysis_path, "shots") self.pcap_path = os.path.join(self.analysis_path, "dump.pcap") def run(self): """Start processing. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Signature(object): """Base abstract class for signature.""" name = "" description = "" severity = 1 categories = [] authors = [] references = [] alert = False enabled = True def __init__(self): self.data = [] def run(self, results=None): """Start signature processing. @param results: analysis results. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Report(object): """Base abstract class for reporting module.""" def __init__(self): self.analysis_path = "" self.reports_path = "" self.options = None def set_path(self, analysis_path): """Set analysis folder path. @param analysis_path: analysis folder path. """ self.analysis_path = analysis_path self.conf_path = os.path.join(self.analysis_path, "analysis.conf") self.reports_path = os.path.join(self.analysis_path, "reports") if not os.path.exists(self.reports_path): os.mkdir(self.reports_path) def set_options(self, options): """Set report options. @param options: report options dict. """ self.options = options def run(self): """Start report processing. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError Docstring minor fix # Copyright (C) 2010-2012 Cuckoo Sandbox Developers. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import os import logging import ConfigParser from lib.cuckoo.common.exceptions import CuckooMachineError from lib.cuckoo.common.constants import CUCKOO_ROOT log = logging.getLogger(__name__) class Dictionary(dict): """Cuckoo custom dict.""" def __getattr__(self, key): return self.get(key, None) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ class MachineManager(object): """Base abstract class for analysis machine manager.""" def __init__(self): self.module_name = "" self.options = None self.machines = [] def set_options(self, options): """Set machine manager options. @param options: machine manager options dict. """ self.options = options def initialize(self, module_name): """Read and load machines configuration, try to check the configuration. @param module_name: module name. """ # Load. self._initialize(module_name) # Run initialization checks. self._initialize_check() def _initialize(self, module_name): """Read configuration. @param module_name: module name. """ self.module_name = module_name mmanager_opts = self.options.get(module_name) for machine_id in mmanager_opts["machines"].strip().split(","): try: machine_opts = self.options.get(machine_id) machine = Dictionary() machine.id = machine_id machine.label = machine_opts["label"] machine.platform = machine_opts["platform"] machine.ip = machine_opts["ip"] machine.locked = False self.machines.append(machine) except AttributeError: log.warning("Configuration details about machine %s are missing. Continue" % machine_id) continue def _initialize_check(self): """Runs checks against virtualization software when a machine manager is initialized. @note: in machine manager modules you may override or superclass this method. @raise CuckooMachineError: if a misconfiguration or a unkown vm state is found. """ try: configured_vm = self._list() for machine in self.machines: if machine.label not in configured_vm: raise CuckooMachineError("Configured machine %s was not detected or it's not in proper state" % machine.label) except NotImplementedError: pass def availables(self): """How many machines are free. @return: free machines count. """ count = 0 for machine in self.machines: if not machine.locked: count += 1 return count def acquire(self, machine_id=None, platform=None): """Acquire a machine to start analysis. @param machine_id: machine ID. @param platform: machine platform. @return: machine or None. """ if machine_id: for machine in self.machines: if machine.id == machine_id and not machine.locked: machine.locked = True return machine elif platform: for machine in self.machines: if machine.platform == platform and not machine.locked: machine.locked = True return machine else: for machine in self.machines: if not machine.locked: machine.locked = True return machine return None def release(self, label=None): """Release a machine. @param label: machine name. """ if label: for machine in self.machines: if machine.label == label: machine.locked = False def running(self): """Returns running virutal machines. @return: running virtual machines list. """ return [m for m in self.machines if m.locked] def start(self, label=None): """Start a machine. @param label: machine name. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError def stop(self, label=None): """Stop a machine. @param label: machine name. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError def _list(self): """Lists virtual machines configured. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Processing(object): """Base abstract class for processing module.""" def __init__(self): self.analysis_path = "" self.logs_path = "" def set_path(self, analysis_path): """Set paths. @param analysis_path: analysis folder path. """ self.analysis_path = analysis_path self.log_path = os.path.join(self.analysis_path, "analysis.log") self.conf_path = os.path.join(self.analysis_path, "analysis.conf") self.file_path = os.path.realpath(os.path.join(self.analysis_path, "binary")) self.dropped_path = os.path.join(self.analysis_path, "files") self.logs_path = os.path.join(self.analysis_path, "logs") self.shots_path = os.path.join(self.analysis_path, "shots") self.pcap_path = os.path.join(self.analysis_path, "dump.pcap") def run(self): """Start processing. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Signature(object): """Base abstract class for signature.""" name = "" description = "" severity = 1 categories = [] authors = [] references = [] alert = False enabled = True def __init__(self): self.data = [] def run(self, results=None): """Start signature processing. @param results: analysis results. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError class Report(object): """Base abstract class for reporting module.""" def __init__(self): self.analysis_path = "" self.reports_path = "" self.options = None def set_path(self, analysis_path): """Set analysis folder path. @param analysis_path: analysis folder path. """ self.analysis_path = analysis_path self.conf_path = os.path.join(self.analysis_path, "analysis.conf") self.reports_path = os.path.join(self.analysis_path, "reports") if not os.path.exists(self.reports_path): os.mkdir(self.reports_path) def set_options(self, options): """Set report options. @param options: report options dict. """ self.options = options def run(self): """Start report processing. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError
from collections import OrderedDict from copy import deepcopy from django.core.mail import mail_admins from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group from django.contrib.auth.decorators import login_required from django.db import IntegrityError from django.forms.models import modelformset_factory from django.http import HttpResponseForbidden, JsonResponse, HttpResponse, HttpResponseBadRequest from django.db.models import FieldDoesNotExist from django.shortcuts import render, redirect, get_object_or_404 from django.template import loader, RequestContext, Context from django.template.context_processors import csrf from .forms import * from .filters import * from .models import Summoner, Monster, Fusion, Building, BuildingInstance, MonsterInstance, MonsterPiece, TeamGroup, Team, RuneInstance, RuneCraftInstance, Storage def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): if User.objects.filter(username=form.cleaned_data['username']).exists(): form.add_error('username', 'Username already taken') else: new_user = None new_summoner = None try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_user.groups.add(Group.objects.get(name='Summoners')) new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile_default', profile_name=user.username) except IntegrityError as e: if new_user is not None: new_user.delete() if new_summoner is not None: new_summoner.delete() form.add_error(None, 'There was an issue completing your registration. Please try again.') mail_admins( subject='Error during user registration', message='{}'.format(e), fail_silently=True, ) context = {'form': form} return render(request, 'herders/register.html', context) @login_required def change_username(request): user = request.user form = CrispyChangeUsernameForm(request.POST or None) context = { 'form': form, } if request.method == 'POST' and form.is_valid(): try: user.username = form.cleaned_data['username'] user.save() return redirect('username_change_complete') except IntegrityError: form.add_error('username', 'Username already taken') return render(request, 'registration/change_username.html', context) def change_username_complete(request): return render(request, 'registration/change_username_complete.html') @login_required def profile_delete(request, profile_name): user = request.user try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = DeleteProfileForm(request.POST or None) form.helper.form_action = reverse('herders:profile_delete', kwargs={'profile_name': profile_name}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): logout(request) user.delete() messages.warning(request, 'Your profile has been permanently deleted.') return redirect('news:latest_news') return render(request, 'herders/profile/profile_delete.html', context) else: return HttpResponseForbidden("You don't own this profile") @login_required def following(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_following', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'view': 'following', 'return_path': return_path, } return render(request, 'herders/profile/following/list.html', context) @login_required def follow_add(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() new_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.add(new_follower) messages.info(request, 'Now following %s' % new_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() @login_required def follow_remove(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() removed_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.remove(removed_follower) messages.info(request, 'Unfollowed %s' % removed_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() def profile(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_filter_form = FilterMonsterInstanceForm(auto_id='id_filter_%s') monster_filter_form.helper.form_action = reverse('herders:monster_inventory', kwargs={'profile_name': profile_name}) context = { 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'monster_filter_form': monster_filter_form, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_inventory/base.html', context) else: return render(request, 'herders/profile/not_public.html') def buildings(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'summoner': summoner, 'is_owner': is_owner, 'profile_name': profile_name, } return render(request, 'herders/profile/buildings/base.html', context) def buildings_inventory(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) all_buildings = Building.objects.all().order_by('name') building_data = [] total_glory_cost = 0 spent_glory = 0 total_guild_cost = 0 spent_guild = 0 for b in all_buildings: bldg_data = _building_data(summoner, b) if b.area == Building.AREA_GENERAL: total_glory_cost += sum(b.upgrade_cost) spent_glory += bldg_data['spent_upgrade_cost'] elif b.area == Building.AREA_GUILD: total_guild_cost += sum(b.upgrade_cost) spent_guild += bldg_data['spent_upgrade_cost'] building_data.append(bldg_data) context = { 'is_owner': is_owner, 'summoner': summoner, 'profile_name': profile_name, 'buildings': building_data, 'total_glory_cost': total_glory_cost, 'spent_glory': spent_glory, 'glory_progress': float(spent_glory) / total_glory_cost * 100, 'total_guild_cost': total_guild_cost, 'spent_guild': spent_guild, 'guild_progress': float(spent_guild) / total_guild_cost * 100, } return render(request, 'herders/profile/buildings/inventory.html', context) @login_required def building_edit(request, profile_name, building_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) base_building = get_object_or_404(Building, pk=building_id) try: owned_instance = BuildingInstance.objects.get(owner=summoner, building=base_building) except BuildingInstance.DoesNotExist: owned_instance = BuildingInstance.objects.create(owner=summoner, level=0, building=base_building) form = EditBuildingForm(request.POST or None, instance=owned_instance) form.helper.form_action = reverse('herders:building_edit', kwargs={'profile_name': profile_name, 'building_id': building_id}) context = { 'form': form, } context.update(csrf(request)) if is_owner: if request.method == 'POST' and form.is_valid(): owned_instance = form.save() messages.success(request,'Updated ' + owned_instance.building.name + ' to level ' + str(owned_instance.level)) response_data = { 'code': 'success', } else: template = loader.get_template('herders/profile/buildings/edit_form.html') response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() def _building_data(summoner, building): percent_stat = building.affected_stat in Building.PERCENT_STATS total_upgrade_cost = sum(building.upgrade_cost) if building.area == Building.AREA_GENERAL: currency = 'glory_points.png' else: currency = 'guild_points.png' try: instance = BuildingInstance.objects.get(owner=summoner, building=building) if instance.level > 0: stat_bonus = building.stat_bonus[instance.level - 1] else: stat_bonus = 0 remaining_upgrade_cost = instance.remaining_upgrade_cost() except BuildingInstance.DoesNotExist: instance = None stat_bonus = 0 remaining_upgrade_cost = total_upgrade_cost except BuildingInstance.MultipleObjectsReturned: # Should only be 1 ever - use the first and delete the others. instance = BuildingInstance.objects.filter(owner=summoner, building=building).first() BuildingInstance.objects.filter(owner=summoner, building=building).exclude(pk=instance.pk).delete() return _building_data(summoner, building) return { 'base': building, 'instance': instance, 'stat_bonus': stat_bonus, 'percent_stat': percent_stat, 'spent_upgrade_cost': total_upgrade_cost - remaining_upgrade_cost, 'total_upgrade_cost': total_upgrade_cost, 'upgrade_progress': float(total_upgrade_cost - remaining_upgrade_cost) / total_upgrade_cost * 100, 'currency': currency, } def monster_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to ourself without the view mode or box grouping if view_mode: request.session['profile_view_mode'] = view_mode.lower() if box_grouping: request.session['profile_group_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Profile view mode cookie set") view_mode = request.session.get('profile_view_mode', 'box').lower() box_grouping = request.session.get('profile_group_method', 'grade').lower() try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') monster_queryset = MonsterInstance.objects.filter(owner=summoner).select_related('monster', 'monster__awakens_from') total_monsters = monster_queryset.count() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if view_mode == 'list': monster_queryset = monster_queryset.select_related( 'monster__leader_skill', 'monster__awakens_to' ).prefetch_related( 'monster__skills', 'runeinstance_set', 'team_set', 'team_leader', 'tags' ) form = FilterMonsterInstanceForm(request.POST or None, auto_id='id_filter_%s') if form.is_valid(): monster_filter = MonsterInstanceFilter(form.cleaned_data, queryset=monster_queryset) else: monster_filter = MonsterInstanceFilter(queryset=monster_queryset) filtered_count = monster_filter.qs.count() context = { 'monsters': monster_filter.qs, 'total_count': total_monsters, 'filtered_count': filtered_count, 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'pieces': context['monster_pieces'] = MonsterPiece.objects.filter(owner=summoner).select_related('monster') template = 'herders/profile/monster_inventory/summoning_pieces.html' elif view_mode == 'list': template = 'herders/profile/monster_inventory/list.html' else: # Group up the filtered monsters monster_stable = OrderedDict() if box_grouping == 'grade' or box_grouping == 'stars': monster_stable['6*'] = monster_filter.qs.filter(stars=6).order_by('-level', 'monster__element', 'monster__name') monster_stable['5*'] = monster_filter.qs.filter(stars=5).order_by('-level', 'monster__element', 'monster__name') monster_stable['4*'] = monster_filter.qs.filter(stars=4).order_by('-level', 'monster__element', 'monster__name') monster_stable['3*'] = monster_filter.qs.filter(stars=3).order_by('-level', 'monster__element', 'monster__name') monster_stable['2*'] = monster_filter.qs.filter(stars=2).order_by('-level', 'monster__element', 'monster__name') monster_stable['1*'] = monster_filter.qs.filter(stars=1).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'natural_stars': nat5 = (Q(monster__base_stars=6) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=5) & Q(monster__is_awakened=False)) nat4 = (Q(monster__base_stars=5) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=4) & Q(monster__is_awakened=False)) nat3 = (Q(monster__base_stars=4) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=3) & Q(monster__is_awakened=False)) nat2 = (Q(monster__base_stars=3) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=2) & Q(monster__is_awakened=False)) nat1 = (Q(monster__base_stars=2) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=1) & Q(monster__is_awakened=False)) monster_stable['Natural 5*'] = monster_filter.qs.filter(nat5).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 4*'] = monster_filter.qs.filter(nat4).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 3*'] = monster_filter.qs.filter(nat3).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 2*'] = monster_filter.qs.filter(nat2).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 1*'] = monster_filter.qs.filter(nat1).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'level': monster_stable['40'] = monster_filter.qs.filter(level=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['39-31'] = monster_filter.qs.filter(level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['30-21'] = monster_filter.qs.filter(level__gt=20).filter(level__lte=30).order_by( '-level', '-stars', 'monster__element', 'monster__name') monster_stable['20-11'] = monster_filter.qs.filter(level__gt=10).filter(level__lte=20).order_by( '-level', '-stars', 'monster__element', 'monster__name') monster_stable['10-1'] = monster_filter.qs.filter(level__lte=10).order_by('-level', '-stars', 'monster__element', 'monster__name') elif box_grouping == 'element' or box_grouping == 'attribute': monster_stable['water'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'archetype': monster_stable['attack'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_ATTACK).order_by('-stars', '-level', 'monster__name') monster_stable['hp'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_HP).order_by('-stars', '-level', 'monster__name') monster_stable['support'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_SUPPORT).order_by('-stars', '-level', 'monster__name') monster_stable['defense'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_DEFENSE).order_by('-stars', '-level', 'monster__name') monster_stable['material'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_MATERIAL).order_by('-stars', '-level', 'monster__name') monster_stable['other'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_NONE).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'priority': monster_stable['High'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_HIGH).order_by('-level', 'monster__element', 'monster__name') monster_stable['Medium'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_MED).order_by('-level', 'monster__element', 'monster__name') monster_stable['Low'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_LOW).order_by('-level', 'monster__element', 'monster__name') monster_stable['None'] = monster_filter.qs.select_related('monster').filter(owner=summoner).filter(Q(priority=None) | Q(priority=0)).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'family': for mon in monster_filter.qs: if mon.monster.is_awakened and mon.monster.awakens_from is not None: family_name = mon.monster.awakens_from.name else: family_name = mon.monster.name if family_name not in monster_stable: monster_stable[family_name] = [] monster_stable[family_name].append(mon) # Sort ordered dict alphabetically by family name monster_stable = OrderedDict(sorted(monster_stable.items(), key=lambda family:family[0])) else: return HttpResponseBadRequest('Invalid sort method') context['monster_stable'] = monster_stable context['box_grouping'] = box_grouping.replace('_', ' ') template = 'herders/profile/monster_inventory/box.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.info(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def storage(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: craft_mats = [] essence_mats = [] for field_name in Storage.ESSENCE_FIELDS: essence_mats.append({ 'name': summoner.storage._meta.get_field(field_name).help_text, 'field_name': field_name, 'element': field_name.split('_')[0], 'qty': getattr(summoner.storage, field_name) }) for field_name in Storage.CRAFT_FIELDS: craft_mats.append({ 'name': summoner.storage._meta.get_field(field_name).help_text, 'field_name': field_name, 'qty': getattr(summoner.storage, field_name) }) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'essence_mats': essence_mats, 'craft_mats': craft_mats, } return render(request, 'herders/profile/storage/base.html', context=context) else: return HttpResponseForbidden() @login_required def storage_update(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner and request.POST: field_name = request.POST.get('name') try: new_value = int(request.POST.get('value')) except ValueError: return HttpResponseBadRequest('Invalid Entry') essence_size = None if 'essence' in field_name: # Split the actual field name off from the size try: field_name, essence_size = field_name.split('.') size_map = { 'low': Storage.ESSENCE_LOW, 'mid': Storage.ESSENCE_MID, 'high': Storage.ESSENCE_HIGH, } essence_size = size_map[essence_size] except (ValueError, KeyError): return HttpResponseBadRequest() try: Storage._meta.get_field(field_name) except FieldDoesNotExist: return HttpResponseBadRequest() else: if essence_size is not None: # Get a copy of the size array and set the correct index to new value essence_list = getattr(summoner.storage, field_name) essence_list[essence_size] = new_value new_value = essence_list setattr(summoner.storage, field_name, new_value) summoner.storage.save() return HttpResponse() else: return HttpResponseForbidden() @login_required def quick_fodder_menu(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: template = loader.get_template('herders/profile/monster_inventory/quick_fodder_menu.html') response_data = { 'code': 'success', 'html': template.render(), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = AddMonsterInstanceForm(request.POST or None) else: form = AddMonsterInstanceForm(initial=request.GET.dict()) if request.method == 'POST' and form.is_valid(): # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') context = { 'profile_name': profile_name, 'instance': new_monster, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_monster.pk.hex, 'html': template.render(context), } else: form.helper.form_action = reverse('herders:monster_instance_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/add_monster_form.html') # Return form filled in and errors shown context = {'add_monster_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: new_monster = MonsterInstance.objects.create(owner=summoner, monster=monster_to_add, stars=int(stars), level=int(level), fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_bulk_add(request, profile_name): return_path = reverse('herders:profile_default', kwargs={'profile_name': profile_name}) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) BulkAddFormset = modelformset_factory(MonsterInstance, form=BulkAddMonsterInstanceForm, formset=BulkAddMonsterInstanceFormset, extra=5, max_num=50) if request.method == 'POST': formset = BulkAddFormset(request.POST) else: formset = BulkAddFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'bulk_add_formset_action': request.path + '?next=' + return_path, 'view': 'profile', } if is_owner: if request.method == 'POST': if formset.is_valid(): new_instances = formset.save(commit=False) for new_instance in new_instances: try: if new_instance.monster: new_instance.owner = summoner if new_instance.monster.archetype == Monster.TYPE_MATERIAL: new_instance.priority = MonsterInstance.PRIORITY_DONE new_instance.save() messages.success(request, 'Added %s to your collection.' % new_instance) except ObjectDoesNotExist: # Blank form, don't care pass return redirect(return_path) else: raise PermissionDenied("Trying to bulk add to profile you don't own") context['bulk_add_formset'] = formset return render(request, 'herders/profile/monster_inventory/bulk_add_form.html', context) def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', request.path ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'view': 'profile', } try: context['instance'] = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return render(request, 'herders/profile/monster_view/not_found.html', context) if is_owner or summoner.public: return render(request, 'herders/profile/monster_view/base.html', context) else: return render(request, 'herders/profile/not_public.html') def monster_instance_view_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() instance_runes = [ instance.runeinstance_set.filter(slot=1).first(), instance.runeinstance_set.filter(slot=2).first(), instance.runeinstance_set.filter(slot=3).first(), instance.runeinstance_set.filter(slot=4).first(), instance.runeinstance_set.filter(slot=5).first(), instance.runeinstance_set.filter(slot=6).first(), ] context = { 'runes': instance_runes, 'instance': instance, 'profile_name': profile_name, 'is_owner': is_owner, } return render(request, 'herders/profile/monster_view/runes.html', context) def monster_instance_view_stats(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() context = { 'instance': instance, 'max_stats': instance.get_max_level_stats(), 'bldg_stats': instance.get_building_stats(), 'guild_stats': instance.get_building_stats(Building.AREA_GUILD), } return render(request, 'herders/profile/monster_view/stats.html', context) def monster_instance_view_skills(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) context = { 'instance': instance, 'skills': skills, } return render(request, 'herders/profile/monster_view/skills.html', context) def monster_instance_view_info(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() if instance.monster.is_awakened: ingredient_in = instance.monster.fusion_set.all() elif instance.monster.can_awaken and instance.monster.awakens_to: ingredient_in = instance.monster.awakens_to.fusion_set.all() else: ingredient_in = [] if instance.monster.is_awakened and instance.monster.awakens_from: product_of = instance.monster.awakens_from.product.first() elif instance.monster.can_awaken: product_of = instance.monster.product.first() else: product_of = [] context = { 'instance': instance, 'profile_name': profile_name, 'fusion_ingredient_in': ingredient_in, 'fusion_product_of': product_of, 'skillups': instance.get_possible_skillups(), } return render(request, 'herders/profile/monster_view/notes_info.html', context) @login_required() def monster_instance_remove_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: try: instance = MonsterInstance.objects.get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() else: for rune in instance.runeinstance_set.all(): rune.assigned_to = None rune.save() instance.save() messages.success(request, 'Removed all runes from ' + str(instance)) response_data = { 'code': 'success', } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() instance = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) form = EditMonsterInstanceForm(request.POST or None, instance=instance) form.helper.form_action = request.path if len(skills) >= 1 and skills[0]['skill'].max_level > 1: form.helper['skill_1_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_1", data_skill_field=form['skill_1_level'].auto_id), ) form.helper['skill_1_level'].wrap(Field, min=1, max=skills[0]['skill'].max_level) form.fields['skill_1_level'].label = skills[0]['skill'].name + " Level" else: form.helper['skill_1_level'].wrap(Div, css_class="hidden") if len(skills) >= 2 and skills[1]['skill'].max_level > 1: form.helper['skill_2_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_2", data_skill_field=form['skill_2_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_2_level'].wrap(Field, min=1, max=skills[1]['skill'].max_level) form.fields['skill_2_level'].label = skills[1]['skill'].name + " Level" else: form.helper['skill_2_level'].wrap(Div, css_class="hidden") if len(skills) >= 3 and skills[2]['skill'].max_level > 1: form.helper['skill_3_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_3", data_skill_field=form['skill_3_level'].auto_id), min=1, max=skills[2]['skill'].max_level, ) form.helper['skill_3_level'].wrap(Field, min=1, max=skills[2]['skill'].max_level) form.fields['skill_3_level'].label = skills[2]['skill'].name + " Level" else: form.helper['skill_3_level'].wrap(Div, css_class="hidden") if len(skills) >= 4 and skills[3]['skill'].max_level > 1: form.helper['skill_4_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_4", data_skill_field=form['skill_4_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_4_level'].wrap(Field, min=1, max=skills[3]['skill'].max_level) form.fields['skill_4_level'].label = skills[3]['skill'].name + " Level" else: form.helper['skill_4_level'].wrap(Div, css_class="hidden") if not instance.monster.homunculus: form.helper['custom_name'].wrap(Div, css_class="hidden") if request.method == 'POST' and form.is_valid(): mon = form.save() messages.success(request, 'Successfully edited ' + str(mon)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'instance': mon, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': mon.pk.hex, 'html': template.render(context), } else: # Return form filled in and errors shown template = loader.get_template('herders/profile/monster_view/edit_form.html') context = {'edit_monster_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: messages.warning(request, 'Deleted ' + str(monster)) monster.delete() return redirect(return_path) else: return HttpResponseBadRequest() @login_required() def monster_instance_power_up(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = PowerUpMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_power_up', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) context = { 'profile_name': request.user.username, 'monster': monster, 'is_owner': is_owner, 'form': form, 'view': 'profile', } validation_errors = {} response_data = { 'code': 'error' } if is_owner: if request.method == 'POST' and form.is_valid(): food_monsters = form.cleaned_data['monster'] # Check that monster is not being fed to itself if monster in food_monsters: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if not form.cleaned_data['ignore_evolution']: if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars or higher." % monster.stars # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.warning(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: response_data['code'] = 'success' else: response_data['code'] = 'edit' return JsonResponse(response_data) else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") template = loader.get_template('herders/profile/monster_view/power_up_form.html') # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors context.update(csrf(request)) response_data['html'] = template.render(context) return JsonResponse(response_data) @login_required() def monster_instance_awaken(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) template = loader.get_template('herders/profile/monster_view/awaken_form.html') form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_awaken', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if is_owner: if not monster.monster.is_awakened: if request.method == 'POST' and form.is_valid(): # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) summoner.storage.magic_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_magic_high summoner.storage.magic_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_magic_mid summoner.storage.magic_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_magic_low summoner.storage.fire_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_fire_high summoner.storage.fire_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_fire_mid summoner.storage.fire_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_fire_low summoner.storage.water_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_water_high summoner.storage.water_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_water_mid summoner.storage.water_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_water_low summoner.storage.wind_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_wind_high summoner.storage.wind_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_wind_mid summoner.storage.wind_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_wind_low summoner.storage.dark_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_dark_high summoner.storage.dark_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_dark_mid summoner.storage.dark_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_dark_low summoner.storage.light_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_light_high summoner.storage.light_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_light_mid summoner.storage.light_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_light_low summoner.storage.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() response_data = { 'code': 'success', 'removeElement': '#awakenMonsterButton', } else: storage = summoner.storage.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().items(): available_essences[element] = OrderedDict() for size, cost in essences.items(): if cost > 0: available_essences[element][size] = { 'qty': storage[element][size], 'sufficient': storage[element][size] >= cost, } context = { 'awaken_form': form, 'available_essences': available_essences, 'instance': monster, } context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: error_template = loader.get_template('herders/profile/monster_already_awakened.html') response_data = { 'code': 'error', 'html': error_template.render() } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_duplicate(request, profile_name, instance_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() messages.success(request, 'Succesfully copied ' + str(newmonster)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'is_owner': True, 'instance': newmonster, } response_data = { 'code': 'success', 'instance_id': newmonster.pk.hex, 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = MonsterPieceForm(request.POST or None) else: form = MonsterPieceForm() form.helper.form_action = reverse('herders:monster_piece_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if request.method == 'POST' and form.is_valid(): # Create the monster instance new_pieces = form.save(commit=False) new_pieces.owner = request.user.summoner new_pieces.save() messages.success(request, 'Added %s to your collection.' % new_pieces) response_data = { 'code': 'success' } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if is_owner: form = MonsterPieceForm(request.POST or None, instance=pieces) form.helper.form_action = request.path if request.method == 'POST' and form.is_valid(): new_piece = form.save() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': new_piece, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_piece.pk.hex, 'html': template.render(context), } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_summon(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if pieces.can_summon(): new_monster = MonsterInstance.objects.create(owner=summoner, monster=pieces.monster, stars=pieces.monster.base_stars, level=1, fodder=False, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) # Remove the pieces, delete if 0 pieces.pieces -= pieces.PIECE_REQUIREMENTS[pieces.monster.base_stars] pieces.save() response_data = { 'code': 'success', } if pieces.pieces <= 0: pieces.delete() else: template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': pieces, 'is_owner': is_owner, } response_data['instance_id'] = pieces.pk.hex response_data['html'] = template.render(context), return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) pieces = get_object_or_404(MonsterPiece, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == pieces.owner: messages.warning(request, 'Deleted ' + str(pieces)) pieces.delete() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) fusions = Fusion.objects.all() context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'fusions': fusions, } return render(request, 'herders/profile/fusion/base.html', context) def fusion_progress_detail(request, profile_name, monster_slug): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: try: fusion = Fusion.objects.get(product__bestiary_slug=monster_slug) except Fusion.DoesNotExist: return HttpResponseBadRequest() else: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).exists() # Scan summoner's collection for instances each ingredient fusion_ready = True for ingredient in fusion.ingredients.all().select_related('awakens_from', 'awakens_to'): owned_ingredients = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') owned_ingredient_pieces = MonsterPiece.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).first() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: if owned_ingredient_pieces: acquired = owned_ingredient_pieces.can_summon() else: acquired = False evolved = False leveled = False awakened = False complete = False if not complete: fusion_ready = False # Check if this ingredient is fusable sub_fusion = None sub_fusion_awakening_cost = None try: sub_fusion = Fusion.objects.get(product=ingredient.awakens_from) except Fusion.DoesNotExist: pass else: if not acquired: awakened_sub_fusion_ingredients = MonsterInstance.objects.filter( monster__pk__in=sub_fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) sub_fusion_awakening_cost = sub_fusion.total_awakening_cost(awakened_sub_fusion_ingredients) ingredient_progress = { 'instance': ingredient, 'owned': owned_ingredients, 'pieces': owned_ingredient_pieces, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, 'is_fuseable': True if sub_fusion else False, 'sub_fusion_cost': sub_fusion_awakening_cost, } ingredients.append(ingredient_progress) awakened_owned_ingredients = MonsterInstance.objects.filter( monster__pk__in=fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) total_cost = fusion.total_awakening_cost(awakened_owned_ingredients) essences_satisfied, total_missing = fusion.missing_awakening_cost(summoner) # Determine the total/missing essences including sub-fusions if fusion.sub_fusion_available(): total_sub_fusion_cost = deepcopy(total_cost) for ingredient in ingredients: if ingredient['sub_fusion_cost']: for element, sizes in total_sub_fusion_cost.items(): for size, qty in sizes.items(): total_sub_fusion_cost[element][size] += ingredient['sub_fusion_cost'][element][size] # Now determine what's missing based on owner's storage storage = summoner.storage.get_storage() sub_fusion_total_missing = { element: { size: total_sub_fusion_cost[element][size] - storage[element][size] if total_sub_fusion_cost[element][size] > storage[element][size] else 0 for size, qty in element_sizes.items() } for element, element_sizes in total_sub_fusion_cost.items() } sub_fusion_mats_satisfied = True for sizes in total_sub_fusion_cost.values(): for qty in sizes.values(): if qty > 0: sub_fusion_mats_satisfied = False else: sub_fusion_total_missing = None sub_fusion_mats_satisfied = None progress = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_mats_cost': total_cost, 'awakening_mats_sufficient': essences_satisfied, 'awakening_mats_missing': total_missing, 'sub_fusion_mats_missing': sub_fusion_total_missing, 'sub_fusion_mats_sufficient': sub_fusion_mats_satisfied, 'ready': fusion_ready, } context['fusion'] = progress return render(request, 'herders/profile/fusion/fusion_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_edit(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = EditTeamGroupForm(request.POST or None, instance=team_group) if is_owner: if form.is_valid() and request.method == 'POST': form.save() return redirect(return_path) else: return PermissionDenied("Editing a group you don't own") context = { 'profile_name': profile_name, 'summoner': summoner, 'form': form, 'group_id': group_id, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } return render(request, 'herders/profile/teams/team_group_edit.html', context) @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.warning(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader and team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/team_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: edit_form.full_clean() # re-clean due to updated querysets after form initialization if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save(commit=False) team.owner = summoner team.save() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.warning(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def runes(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) filter_form = FilterRuneForm(auto_id="filter_id_%s") filter_form.helper.form_action = reverse('herders:rune_inventory', kwargs={'profile_name': profile_name}) context = { 'view': 'runes', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'old_rune_count': RuneInstance.objects.filter(owner=summoner, substats__isnull=True).count(), 'rune_filter_form': filter_form, } if is_owner or summoner.public: return render(request, 'herders/profile/runes/base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to base profile URL if view_mode: request.session['rune_inventory_view_mode'] = view_mode.lower() if box_grouping: request.session['rune_inventory_box_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Rune view mode cookie set") view_mode = request.session.get('rune_inventory_view_mode', 'box').lower() box_grouping = request.session.get('rune_inventory_box_method', 'slot').lower() if view_mode == 'crafts': return rune_inventory_crafts(request, profile_name) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) rune_queryset = RuneInstance.objects.filter(owner=summoner).select_related('assigned_to', 'assigned_to__monster') total_count = rune_queryset.count() form = FilterRuneForm(request.POST or None) if form.is_valid(): rune_filter = RuneInstanceFilter(form.cleaned_data, queryset=rune_queryset) else: rune_filter = RuneInstanceFilter(None, queryset=rune_queryset) filtered_count = rune_filter.qs.count() context = { 'runes': rune_filter.qs, 'total_count': total_count, 'filtered_count': filtered_count, 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'box': rune_box = [] if box_grouping == 'slot': rune_box.append({ 'name': 'Slot 1', 'runes': rune_filter.qs.filter(slot=1) }) rune_box.append({ 'name': 'Slot 2', 'runes': rune_filter.qs.filter(slot=2) }) rune_box.append({ 'name': 'Slot 3', 'runes': rune_filter.qs.filter(slot=3) }) rune_box.append({ 'name': 'Slot 4', 'runes': rune_filter.qs.filter(slot=4) }) rune_box.append({ 'name': 'Slot 5', 'runes': rune_filter.qs.filter(slot=5) }) rune_box.append({ 'name': 'Slot 6', 'runes': rune_filter.qs.filter(slot=6) }) elif box_grouping == 'grade': rune_box.append({ 'name': '6*', 'runes': rune_filter.qs.filter(stars=6) }) rune_box.append({ 'name': '5*', 'runes': rune_filter.qs.filter(stars=5) }) rune_box.append({ 'name': '4*', 'runes': rune_filter.qs.filter(stars=4) }) rune_box.append({ 'name': '3*', 'runes': rune_filter.qs.filter(stars=3) }) rune_box.append({ 'name': '2*', 'runes': rune_filter.qs.filter(stars=2) }) rune_box.append({ 'name': '1*', 'runes': rune_filter.qs.filter(stars=1) }) elif box_grouping == 'equipped': rune_box.append({ 'name': 'Not Equipped', 'runes': rune_filter.qs.filter(assigned_to__isnull=True) }) # Create a dictionary of monster PKs and their equipped runes monsters = OrderedDict() for rune in rune_filter.qs.filter(assigned_to__isnull=False).select_related('assigned_to', 'assigned_to__monster').order_by('assigned_to__monster__name', 'slot'): if rune.assigned_to.pk not in monsters: monsters[rune.assigned_to.pk] = { 'name': str(rune.assigned_to), 'runes': [] } monsters[rune.assigned_to.pk]['runes'].append(rune) for monster_runes in monsters.values(): rune_box.append(monster_runes) elif box_grouping == 'type': for (type, type_name) in RuneInstance.TYPE_CHOICES: rune_box.append({ 'name': type_name, 'runes': rune_filter.qs.filter(type=type) }) context['runes'] = rune_box context['box_grouping'] = box_grouping template = 'herders/profile/runes/inventory.html' elif view_mode == 'grid': template = 'herders/profile/runes/inventory_grid.html' else: template = 'herders/profile/runes/inventory_table.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory_crafts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: craft_box = OrderedDict() for (craft, craft_name) in RuneInstance.CRAFT_CHOICES: craft_box[craft_name] = OrderedDict() for rune, rune_name in RuneInstance.TYPE_CHOICES: craft_box[craft_name][rune_name] = RuneCraftInstance.objects.filter(owner=summoner, type=craft, rune=rune).order_by('stat', 'quality') # Immemorial craft_box[craft_name]['Immemorial'] = RuneCraftInstance.objects.filter(owner=summoner, type=craft, rune__isnull=True).order_by('stat', 'quality') context['crafts'] = craft_box return render(request, 'herders/profile/runes/inventory_crafts.html', context) else: return render(request, 'herders/profile/not_public.html') @login_required def rune_add(request, profile_name): form = AddRuneInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_form.html') if request.method == 'POST': if form.is_valid(): # Create the rune instance new_rune = form.save(commit=False) new_rune.owner = request.user.summoner new_rune.save() messages.success(request, 'Added ' + str(new_rune)) # Send back blank form form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: # Check for any pre-filled GET parameters slot = request.GET.get('slot', None) assigned_to = request.GET.get('assigned_to', None) try: assigned_monster = MonsterInstance.objects.get(owner=request.user.summoner, pk=assigned_to) except MonsterInstance.DoesNotExist: assigned_monster = None form = AddRuneInstanceForm(initial={ 'assigned_to': assigned_monster, 'slot': slot if slot is not None else 1, }) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) # Return form filled in and errors shown context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_edit(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneInstanceForm(request.POST or None, instance=rune, auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) template = loader.get_template('herders/profile/runes/add_form.html') if is_owner: context = {'add_rune_form': form} context.update(csrf(request)) if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm(auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'add_rune_form': form} context.update(csrf(request)) # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_assign(request, profile_name, instance_id, slot=None): rune_queryset = RuneInstance.objects.filter(owner=request.user.summoner, assigned_to=None) filter_form = AssignRuneForm(request.POST or None, initial={'slot': slot}, prefix='assign') filter_form.helper.form_action = reverse('herders:rune_assign', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if slot: rune_queryset = rune_queryset.filter(slot=slot) if request.method == 'POST' and filter_form.is_valid(): rune_filter = RuneInstanceFilter(filter_form.cleaned_data, queryset=rune_queryset) template = loader.get_template('herders/profile/runes/assign_results.html') context = { 'filter': rune_filter.qs, 'profile_name': profile_name, 'instance_id': instance_id, } context.update(csrf(request)) response_data = { 'code': 'results', 'html': template.render(context) } else: rune_filter = RuneInstanceFilter(queryset=rune_queryset) template = loader.get_template('herders/profile/runes/assign_form.html') context = { 'filter': rune_filter.qs, 'form': filter_form, 'profile_name': profile_name, 'instance_id': instance_id, } context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_assign_choice(request, profile_name, instance_id, rune_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) rune = get_object_or_404(RuneInstance, pk=rune_id) if rune.assigned_to is not None: # TODO: Warn about removing from other monster? pass # Check for existing rune. existing_runes = monster.runeinstance_set.filter(slot=rune.slot) for existing_rune in existing_runes: existing_rune.assigned_to = None rune.assigned_to = monster rune.save() monster.save() response_data = { 'code': 'success', } return JsonResponse(response_data) @login_required def rune_unassign(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to rune.assigned_to = None rune.save() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def rune_unassign_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) assigned_mons = [] assigned_runes = RuneInstance.objects.filter(owner=summoner, assigned_to__isnull=False) number_assigned = assigned_runes.count() if is_owner: for rune in assigned_runes: if rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) rune.assigned_to = None rune.save() # Resave monster instances that had runes removed to recalc stats for mon in assigned_mons: mon.save() messages.success(request, 'Unassigned ' + str(number_assigned) + ' rune(s).') response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to messages.warning(request, 'Deleted ' + str(rune)) rune.delete() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Delete the runes death_row = RuneInstance.objects.filter(owner=summoner) number_killed = death_row.count() assigned_mons = [] for rune in death_row: if rune.assigned_to and rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) death_row.delete() # Delete the crafts RuneCraftInstance.objects.filter(owner=summoner).delete() messages.warning(request, 'Deleted ' + str(number_killed) + ' runes.') for mon in assigned_mons: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_resave_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: for r in RuneInstance.objects.filter(owner=summoner, substats__isnull=True): r.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_add(request, profile_name): form = AddRuneCraftInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_craft = form.save(commit=False) new_craft.owner = request.user.summoner new_craft.save() messages.success(request, 'Added ' + new_craft.get_type_display() + ' ' + str(new_craft)) # Send back blank form form = AddRuneCraftInstanceForm() form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_craft_edit(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneCraftInstanceForm(request.POST or None, instance=craft) form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_delete(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: messages.warning(request, 'Deleted ' + craft.get_rune_display() + ' ' + str(craft)) craft.delete() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() Resolved an issue saving updated team roster from collections import OrderedDict from copy import deepcopy from django.core.mail import mail_admins from django.core.urlresolvers import reverse from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import Group from django.contrib.auth.decorators import login_required from django.db import IntegrityError from django.forms.models import modelformset_factory from django.http import HttpResponseForbidden, JsonResponse, HttpResponse, HttpResponseBadRequest from django.db.models import FieldDoesNotExist from django.shortcuts import render, redirect, get_object_or_404 from django.template import loader, RequestContext, Context from django.template.context_processors import csrf from .forms import * from .filters import * from .models import Summoner, Monster, Fusion, Building, BuildingInstance, MonsterInstance, MonsterPiece, TeamGroup, Team, RuneInstance, RuneCraftInstance, Storage def register(request): form = RegisterUserForm(request.POST or None) if request.method == 'POST': if form.is_valid(): if User.objects.filter(username=form.cleaned_data['username']).exists(): form.add_error('username', 'Username already taken') else: new_user = None new_summoner = None try: # Create the user new_user = User.objects.create_user( username=form.cleaned_data['username'], password=form.cleaned_data['password'], email=form.cleaned_data['email'], ) new_user.save() new_user.groups.add(Group.objects.get(name='Summoners')) new_summoner = Summoner.objects.create( user=new_user, summoner_name=form.cleaned_data['summoner_name'], public=form.cleaned_data['is_public'], ) new_summoner.save() # Automatically log them in user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password']) if user is not None: if user.is_active: login(request, user) return redirect('herders:profile_default', profile_name=user.username) except IntegrityError as e: if new_user is not None: new_user.delete() if new_summoner is not None: new_summoner.delete() form.add_error(None, 'There was an issue completing your registration. Please try again.') mail_admins( subject='Error during user registration', message='{}'.format(e), fail_silently=True, ) context = {'form': form} return render(request, 'herders/register.html', context) @login_required def change_username(request): user = request.user form = CrispyChangeUsernameForm(request.POST or None) context = { 'form': form, } if request.method == 'POST' and form.is_valid(): try: user.username = form.cleaned_data['username'] user.save() return redirect('username_change_complete') except IntegrityError: form.add_error('username', 'Username already taken') return render(request, 'registration/change_username.html', context) def change_username_complete(request): return render(request, 'registration/change_username_complete.html') @login_required def profile_delete(request, profile_name): user = request.user try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = DeleteProfileForm(request.POST or None) form.helper.form_action = reverse('herders:profile_delete', kwargs={'profile_name': profile_name}) context = { 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): logout(request) user.delete() messages.warning(request, 'Your profile has been permanently deleted.') return redirect('news:latest_news') return render(request, 'herders/profile/profile_delete.html', context) else: return HttpResponseForbidden("You don't own this profile") @login_required def following(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_following', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'view': 'following', 'return_path': return_path, } return render(request, 'herders/profile/following/list.html', context) @login_required def follow_add(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() new_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.add(new_follower) messages.info(request, 'Now following %s' % new_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() @login_required def follow_remove(request, profile_name, follow_username): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() removed_follower = get_object_or_404(Summoner, user__username=follow_username) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: summoner.following.remove(removed_follower) messages.info(request, 'Unfollowed %s' % removed_follower.user.username) return redirect(return_path) else: return HttpResponseForbidden() def profile(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') # Determine if the person logged in is the one requesting the view is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_filter_form = FilterMonsterInstanceForm(auto_id='id_filter_%s') monster_filter_form.helper.form_action = reverse('herders:monster_inventory', kwargs={'profile_name': profile_name}) context = { 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'monster_filter_form': monster_filter_form, 'view': 'profile', } if is_owner or summoner.public: return render(request, 'herders/profile/monster_inventory/base.html', context) else: return render(request, 'herders/profile/not_public.html') def buildings(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'summoner': summoner, 'is_owner': is_owner, 'profile_name': profile_name, } return render(request, 'herders/profile/buildings/base.html', context) def buildings_inventory(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) all_buildings = Building.objects.all().order_by('name') building_data = [] total_glory_cost = 0 spent_glory = 0 total_guild_cost = 0 spent_guild = 0 for b in all_buildings: bldg_data = _building_data(summoner, b) if b.area == Building.AREA_GENERAL: total_glory_cost += sum(b.upgrade_cost) spent_glory += bldg_data['spent_upgrade_cost'] elif b.area == Building.AREA_GUILD: total_guild_cost += sum(b.upgrade_cost) spent_guild += bldg_data['spent_upgrade_cost'] building_data.append(bldg_data) context = { 'is_owner': is_owner, 'summoner': summoner, 'profile_name': profile_name, 'buildings': building_data, 'total_glory_cost': total_glory_cost, 'spent_glory': spent_glory, 'glory_progress': float(spent_glory) / total_glory_cost * 100, 'total_guild_cost': total_guild_cost, 'spent_guild': spent_guild, 'guild_progress': float(spent_guild) / total_guild_cost * 100, } return render(request, 'herders/profile/buildings/inventory.html', context) @login_required def building_edit(request, profile_name, building_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) base_building = get_object_or_404(Building, pk=building_id) try: owned_instance = BuildingInstance.objects.get(owner=summoner, building=base_building) except BuildingInstance.DoesNotExist: owned_instance = BuildingInstance.objects.create(owner=summoner, level=0, building=base_building) form = EditBuildingForm(request.POST or None, instance=owned_instance) form.helper.form_action = reverse('herders:building_edit', kwargs={'profile_name': profile_name, 'building_id': building_id}) context = { 'form': form, } context.update(csrf(request)) if is_owner: if request.method == 'POST' and form.is_valid(): owned_instance = form.save() messages.success(request,'Updated ' + owned_instance.building.name + ' to level ' + str(owned_instance.level)) response_data = { 'code': 'success', } else: template = loader.get_template('herders/profile/buildings/edit_form.html') response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() def _building_data(summoner, building): percent_stat = building.affected_stat in Building.PERCENT_STATS total_upgrade_cost = sum(building.upgrade_cost) if building.area == Building.AREA_GENERAL: currency = 'glory_points.png' else: currency = 'guild_points.png' try: instance = BuildingInstance.objects.get(owner=summoner, building=building) if instance.level > 0: stat_bonus = building.stat_bonus[instance.level - 1] else: stat_bonus = 0 remaining_upgrade_cost = instance.remaining_upgrade_cost() except BuildingInstance.DoesNotExist: instance = None stat_bonus = 0 remaining_upgrade_cost = total_upgrade_cost except BuildingInstance.MultipleObjectsReturned: # Should only be 1 ever - use the first and delete the others. instance = BuildingInstance.objects.filter(owner=summoner, building=building).first() BuildingInstance.objects.filter(owner=summoner, building=building).exclude(pk=instance.pk).delete() return _building_data(summoner, building) return { 'base': building, 'instance': instance, 'stat_bonus': stat_bonus, 'percent_stat': percent_stat, 'spent_upgrade_cost': total_upgrade_cost - remaining_upgrade_cost, 'total_upgrade_cost': total_upgrade_cost, 'upgrade_progress': float(total_upgrade_cost - remaining_upgrade_cost) / total_upgrade_cost * 100, 'currency': currency, } def monster_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to ourself without the view mode or box grouping if view_mode: request.session['profile_view_mode'] = view_mode.lower() if box_grouping: request.session['profile_group_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Profile view mode cookie set") view_mode = request.session.get('profile_view_mode', 'box').lower() box_grouping = request.session.get('profile_group_method', 'grade').lower() try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') monster_queryset = MonsterInstance.objects.filter(owner=summoner).select_related('monster', 'monster__awakens_from') total_monsters = monster_queryset.count() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if view_mode == 'list': monster_queryset = monster_queryset.select_related( 'monster__leader_skill', 'monster__awakens_to' ).prefetch_related( 'monster__skills', 'runeinstance_set', 'team_set', 'team_leader', 'tags' ) form = FilterMonsterInstanceForm(request.POST or None, auto_id='id_filter_%s') if form.is_valid(): monster_filter = MonsterInstanceFilter(form.cleaned_data, queryset=monster_queryset) else: monster_filter = MonsterInstanceFilter(queryset=monster_queryset) filtered_count = monster_filter.qs.count() context = { 'monsters': monster_filter.qs, 'total_count': total_monsters, 'filtered_count': filtered_count, 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'pieces': context['monster_pieces'] = MonsterPiece.objects.filter(owner=summoner).select_related('monster') template = 'herders/profile/monster_inventory/summoning_pieces.html' elif view_mode == 'list': template = 'herders/profile/monster_inventory/list.html' else: # Group up the filtered monsters monster_stable = OrderedDict() if box_grouping == 'grade' or box_grouping == 'stars': monster_stable['6*'] = monster_filter.qs.filter(stars=6).order_by('-level', 'monster__element', 'monster__name') monster_stable['5*'] = monster_filter.qs.filter(stars=5).order_by('-level', 'monster__element', 'monster__name') monster_stable['4*'] = monster_filter.qs.filter(stars=4).order_by('-level', 'monster__element', 'monster__name') monster_stable['3*'] = monster_filter.qs.filter(stars=3).order_by('-level', 'monster__element', 'monster__name') monster_stable['2*'] = monster_filter.qs.filter(stars=2).order_by('-level', 'monster__element', 'monster__name') monster_stable['1*'] = monster_filter.qs.filter(stars=1).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'natural_stars': nat5 = (Q(monster__base_stars=6) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=5) & Q(monster__is_awakened=False)) nat4 = (Q(monster__base_stars=5) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=4) & Q(monster__is_awakened=False)) nat3 = (Q(monster__base_stars=4) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=3) & Q(monster__is_awakened=False)) nat2 = (Q(monster__base_stars=3) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=2) & Q(monster__is_awakened=False)) nat1 = (Q(monster__base_stars=2) & Q(monster__is_awakened=True)) | (Q(monster__base_stars=1) & Q(monster__is_awakened=False)) monster_stable['Natural 5*'] = monster_filter.qs.filter(nat5).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 4*'] = monster_filter.qs.filter(nat4).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 3*'] = monster_filter.qs.filter(nat3).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 2*'] = monster_filter.qs.filter(nat2).order_by('-stars', '-level', 'monster__name') monster_stable['Natural 1*'] = monster_filter.qs.filter(nat1).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'level': monster_stable['40'] = monster_filter.qs.filter(level=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['39-31'] = monster_filter.qs.filter(level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__element', 'monster__name') monster_stable['30-21'] = monster_filter.qs.filter(level__gt=20).filter(level__lte=30).order_by( '-level', '-stars', 'monster__element', 'monster__name') monster_stable['20-11'] = monster_filter.qs.filter(level__gt=10).filter(level__lte=20).order_by( '-level', '-stars', 'monster__element', 'monster__name') monster_stable['10-1'] = monster_filter.qs.filter(level__lte=10).order_by('-level', '-stars', 'monster__element', 'monster__name') elif box_grouping == 'element' or box_grouping == 'attribute': monster_stable['water'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name') monster_stable['fire'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name') monster_stable['wind'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name') monster_stable['light'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name') monster_stable['dark'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'archetype': monster_stable['attack'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_ATTACK).order_by('-stars', '-level', 'monster__name') monster_stable['hp'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_HP).order_by('-stars', '-level', 'monster__name') monster_stable['support'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_SUPPORT).order_by('-stars', '-level', 'monster__name') monster_stable['defense'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_DEFENSE).order_by('-stars', '-level', 'monster__name') monster_stable['material'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_MATERIAL).order_by('-stars', '-level', 'monster__name') monster_stable['other'] = monster_filter.qs.filter(monster__archetype=Monster.TYPE_NONE).order_by('-stars', '-level', 'monster__name') elif box_grouping == 'priority': monster_stable['High'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_HIGH).order_by('-level', 'monster__element', 'monster__name') monster_stable['Medium'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_MED).order_by('-level', 'monster__element', 'monster__name') monster_stable['Low'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_LOW).order_by('-level', 'monster__element', 'monster__name') monster_stable['None'] = monster_filter.qs.select_related('monster').filter(owner=summoner).filter(Q(priority=None) | Q(priority=0)).order_by('-level', 'monster__element', 'monster__name') elif box_grouping == 'family': for mon in monster_filter.qs: if mon.monster.is_awakened and mon.monster.awakens_from is not None: family_name = mon.monster.awakens_from.name else: family_name = mon.monster.name if family_name not in monster_stable: monster_stable[family_name] = [] monster_stable[family_name].append(mon) # Sort ordered dict alphabetically by family name monster_stable = OrderedDict(sorted(monster_stable.items(), key=lambda family:family[0])) else: return HttpResponseBadRequest('Invalid sort method') context['monster_stable'] = monster_stable context['box_grouping'] = box_grouping.replace('_', ' ') template = 'herders/profile/monster_inventory/box.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def profile_edit(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) user_form = EditUserForm(request.POST or None, instance=request.user) summoner_form = EditSummonerForm(request.POST or None, instance=request.user.summoner) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'user_form': user_form, 'summoner_form': summoner_form, } if is_owner: if request.method == 'POST' and summoner_form.is_valid() and user_form.is_valid(): summoner_form.save() user_form.save() messages.info(request, 'Your profile has been updated.') return redirect(return_path) else: return render(request, 'herders/profile/profile_edit.html', context) else: return HttpResponseForbidden() @login_required def storage(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: craft_mats = [] essence_mats = [] for field_name in Storage.ESSENCE_FIELDS: essence_mats.append({ 'name': summoner.storage._meta.get_field(field_name).help_text, 'field_name': field_name, 'element': field_name.split('_')[0], 'qty': getattr(summoner.storage, field_name) }) for field_name in Storage.CRAFT_FIELDS: craft_mats.append({ 'name': summoner.storage._meta.get_field(field_name).help_text, 'field_name': field_name, 'qty': getattr(summoner.storage, field_name) }) context = { 'is_owner': is_owner, 'profile_name': profile_name, 'summoner': summoner, 'essence_mats': essence_mats, 'craft_mats': craft_mats, } return render(request, 'herders/profile/storage/base.html', context=context) else: return HttpResponseForbidden() @login_required def storage_update(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner and request.POST: field_name = request.POST.get('name') try: new_value = int(request.POST.get('value')) except ValueError: return HttpResponseBadRequest('Invalid Entry') essence_size = None if 'essence' in field_name: # Split the actual field name off from the size try: field_name, essence_size = field_name.split('.') size_map = { 'low': Storage.ESSENCE_LOW, 'mid': Storage.ESSENCE_MID, 'high': Storage.ESSENCE_HIGH, } essence_size = size_map[essence_size] except (ValueError, KeyError): return HttpResponseBadRequest() try: Storage._meta.get_field(field_name) except FieldDoesNotExist: return HttpResponseBadRequest() else: if essence_size is not None: # Get a copy of the size array and set the correct index to new value essence_list = getattr(summoner.storage, field_name) essence_list[essence_size] = new_value new_value = essence_list setattr(summoner.storage, field_name, new_value) summoner.storage.save() return HttpResponse() else: return HttpResponseForbidden() @login_required def quick_fodder_menu(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: template = loader.get_template('herders/profile/monster_inventory/quick_fodder_menu.html') response_data = { 'code': 'success', 'html': template.render(), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = AddMonsterInstanceForm(request.POST or None) else: form = AddMonsterInstanceForm(initial=request.GET.dict()) if request.method == 'POST' and form.is_valid(): # Create the monster instance new_monster = form.save(commit=False) new_monster.owner = request.user.summoner new_monster.save() messages.success(request, 'Added %s to your collection.' % new_monster) template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') context = { 'profile_name': profile_name, 'instance': new_monster, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_monster.pk.hex, 'html': template.render(context), } else: form.helper.form_action = reverse('herders:monster_instance_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/add_monster_form.html') # Return form filled in and errors shown context = {'add_monster_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_instance_quick_add(request, profile_name, monster_id, stars, level): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster_to_add = get_object_or_404(Monster, pk=monster_id) if is_owner: new_monster = MonsterInstance.objects.create(owner=summoner, monster=monster_to_add, stars=int(stars), level=int(level), fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) return redirect(return_path) else: return HttpResponseForbidden() @login_required() def monster_instance_bulk_add(request, profile_name): return_path = reverse('herders:profile_default', kwargs={'profile_name': profile_name}) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) BulkAddFormset = modelformset_factory(MonsterInstance, form=BulkAddMonsterInstanceForm, formset=BulkAddMonsterInstanceFormset, extra=5, max_num=50) if request.method == 'POST': formset = BulkAddFormset(request.POST) else: formset = BulkAddFormset() context = { 'profile_name': request.user.username, 'return_path': return_path, 'is_owner': is_owner, 'bulk_add_formset_action': request.path + '?next=' + return_path, 'view': 'profile', } if is_owner: if request.method == 'POST': if formset.is_valid(): new_instances = formset.save(commit=False) for new_instance in new_instances: try: if new_instance.monster: new_instance.owner = summoner if new_instance.monster.archetype == Monster.TYPE_MATERIAL: new_instance.priority = MonsterInstance.PRIORITY_DONE new_instance.save() messages.success(request, 'Added %s to your collection.' % new_instance) except ObjectDoesNotExist: # Blank form, don't care pass return redirect(return_path) else: raise PermissionDenied("Trying to bulk add to profile you don't own") context['bulk_add_formset'] = formset return render(request, 'herders/profile/monster_inventory/bulk_add_form.html', context) def monster_instance_view(request, profile_name, instance_id): return_path = request.GET.get( 'next', request.path ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'view': 'profile', } try: context['instance'] = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return render(request, 'herders/profile/monster_view/not_found.html', context) if is_owner or summoner.public: return render(request, 'herders/profile/monster_view/base.html', context) else: return render(request, 'herders/profile/not_public.html') def monster_instance_view_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() instance_runes = [ instance.runeinstance_set.filter(slot=1).first(), instance.runeinstance_set.filter(slot=2).first(), instance.runeinstance_set.filter(slot=3).first(), instance.runeinstance_set.filter(slot=4).first(), instance.runeinstance_set.filter(slot=5).first(), instance.runeinstance_set.filter(slot=6).first(), ] context = { 'runes': instance_runes, 'instance': instance, 'profile_name': profile_name, 'is_owner': is_owner, } return render(request, 'herders/profile/monster_view/runes.html', context) def monster_instance_view_stats(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() context = { 'instance': instance, 'max_stats': instance.get_max_level_stats(), 'bldg_stats': instance.get_building_stats(), 'guild_stats': instance.get_building_stats(Building.AREA_GUILD), } return render(request, 'herders/profile/monster_view/stats.html', context) def monster_instance_view_skills(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) context = { 'instance': instance, 'skills': skills, } return render(request, 'herders/profile/monster_view/skills.html', context) def monster_instance_view_info(request, profile_name, instance_id): try: instance = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() if instance.monster.is_awakened: ingredient_in = instance.monster.fusion_set.all() elif instance.monster.can_awaken and instance.monster.awakens_to: ingredient_in = instance.monster.awakens_to.fusion_set.all() else: ingredient_in = [] if instance.monster.is_awakened and instance.monster.awakens_from: product_of = instance.monster.awakens_from.product.first() elif instance.monster.can_awaken: product_of = instance.monster.product.first() else: product_of = [] context = { 'instance': instance, 'profile_name': profile_name, 'fusion_ingredient_in': ingredient_in, 'fusion_product_of': product_of, 'skillups': instance.get_possible_skillups(), } return render(request, 'herders/profile/monster_view/notes_info.html', context) @login_required() def monster_instance_remove_runes(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: try: instance = MonsterInstance.objects.get(pk=instance_id) except ObjectDoesNotExist: return HttpResponseBadRequest() else: for rune in instance.runeinstance_set.all(): rune.assigned_to = None rune.save() instance.save() messages.success(request, 'Removed all runes from ' + str(instance)) response_data = { 'code': 'success', } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() instance = get_object_or_404(MonsterInstance, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Reconcile skill level with actual skill from base monster skills = [] skill_levels = [ instance.skill_1_level, instance.skill_2_level, instance.skill_3_level, instance.skill_4_level, ] for idx in range(0, instance.monster.skills.count()): skills.append({ 'skill': instance.monster.skills.all()[idx], 'level': skill_levels[idx] }) form = EditMonsterInstanceForm(request.POST or None, instance=instance) form.helper.form_action = request.path if len(skills) >= 1 and skills[0]['skill'].max_level > 1: form.helper['skill_1_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_1", data_skill_field=form['skill_1_level'].auto_id), ) form.helper['skill_1_level'].wrap(Field, min=1, max=skills[0]['skill'].max_level) form.fields['skill_1_level'].label = skills[0]['skill'].name + " Level" else: form.helper['skill_1_level'].wrap(Div, css_class="hidden") if len(skills) >= 2 and skills[1]['skill'].max_level > 1: form.helper['skill_2_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_2", data_skill_field=form['skill_2_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_2_level'].wrap(Field, min=1, max=skills[1]['skill'].max_level) form.fields['skill_2_level'].label = skills[1]['skill'].name + " Level" else: form.helper['skill_2_level'].wrap(Div, css_class="hidden") if len(skills) >= 3 and skills[2]['skill'].max_level > 1: form.helper['skill_3_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_3", data_skill_field=form['skill_3_level'].auto_id), min=1, max=skills[2]['skill'].max_level, ) form.helper['skill_3_level'].wrap(Field, min=1, max=skills[2]['skill'].max_level) form.fields['skill_3_level'].label = skills[2]['skill'].name + " Level" else: form.helper['skill_3_level'].wrap(Div, css_class="hidden") if len(skills) >= 4 and skills[3]['skill'].max_level > 1: form.helper['skill_4_level'].wrap( FieldWithButtons, StrictButton("Max", name="Set_Max_Skill_4", data_skill_field=form['skill_4_level'].auto_id), min=1, max=skills[1]['skill'].max_level, ) form.helper['skill_4_level'].wrap(Field, min=1, max=skills[3]['skill'].max_level) form.fields['skill_4_level'].label = skills[3]['skill'].name + " Level" else: form.helper['skill_4_level'].wrap(Div, css_class="hidden") if not instance.monster.homunculus: form.helper['custom_name'].wrap(Div, css_class="hidden") if request.method == 'POST' and form.is_valid(): mon = form.save() messages.success(request, 'Successfully edited ' + str(mon)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'instance': mon, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': mon.pk.hex, 'html': template.render(context), } else: # Return form filled in and errors shown template = loader.get_template('herders/profile/monster_view/edit_form.html') context = {'edit_monster_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == monster.owner: messages.warning(request, 'Deleted ' + str(monster)) monster.delete() return redirect(return_path) else: return HttpResponseBadRequest() @login_required() def monster_instance_power_up(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) form = PowerUpMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_power_up', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) context = { 'profile_name': request.user.username, 'monster': monster, 'is_owner': is_owner, 'form': form, 'view': 'profile', } validation_errors = {} response_data = { 'code': 'error' } if is_owner: if request.method == 'POST' and form.is_valid(): food_monsters = form.cleaned_data['monster'] # Check that monster is not being fed to itself if monster in food_monsters: validation_errors['base_food_same'] = "You can't feed a monster to itself. " is_evolution = request.POST.get('evolve', False) # Perform validation checks for evolve action if is_evolution: # Check constraints on evolving (or not, if form element was set) # Check monster level and stars if monster.stars >= 6: validation_errors['base_monster_stars'] = "%s is already at 6 stars." % monster.monster.name if not form.cleaned_data['ignore_evolution']: if monster.level != monster.max_level_from_stars(): validation_errors['base_monster_level'] = "%s is not at max level for the current star rating (Lvl %s)." % (monster.monster.name, monster.monster.max_level_from_stars()) # Check number of fodder monsters if len(food_monsters) < monster.stars: validation_errors['food_monster_quantity'] = "Evolution requres %s food monsters." % monster.stars # Check fodder star ratings - must be same as monster for food in food_monsters: if food.stars != monster.stars: if 'food_monster_stars' not in validation_errors: validation_errors['food_monster_stars'] = "All food monsters must be %s stars or higher." % monster.stars # Perform the stars++ if no errors if not validation_errors: # Level up stars monster.stars += 1 monster.level = 1 monster.save() messages.success(request, 'Successfully evolved %s to %s<span class="glyphicon glyphicon-star"></span>' % (monster.monster.name, monster.stars), extra_tags='safe') if not validation_errors: # Delete the submitted monsters for food in food_monsters: if food.owner == request.user.summoner: messages.warning(request, 'Deleted %s' % food) food.delete() else: raise PermissionDenied("Trying to delete a monster you don't own") # Redirect back to return path if evolved, or go to edit screen if power up if is_evolution: response_data['code'] = 'success' else: response_data['code'] = 'edit' return JsonResponse(response_data) else: raise PermissionDenied("Trying to power up or evolve a monster you don't own") template = loader.get_template('herders/profile/monster_view/power_up_form.html') # Any errors in the form will fall through to here and be displayed context['validation_errors'] = validation_errors context.update(csrf(request)) response_data['html'] = template.render(context) return JsonResponse(response_data) @login_required() def monster_instance_awaken(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) monster = get_object_or_404(MonsterInstance, pk=instance_id) template = loader.get_template('herders/profile/monster_view/awaken_form.html') form = AwakenMonsterInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:monster_instance_awaken', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if is_owner: if not monster.monster.is_awakened: if request.method == 'POST' and form.is_valid(): # Subtract essences from inventory if requested if form.cleaned_data['subtract_materials']: summoner = Summoner.objects.get(user=request.user) summoner.storage.magic_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_magic_high summoner.storage.magic_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_magic_mid summoner.storage.magic_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_magic_low summoner.storage.fire_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_fire_high summoner.storage.fire_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_fire_mid summoner.storage.fire_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_fire_low summoner.storage.water_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_water_high summoner.storage.water_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_water_mid summoner.storage.water_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_water_low summoner.storage.wind_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_wind_high summoner.storage.wind_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_wind_mid summoner.storage.wind_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_wind_low summoner.storage.dark_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_dark_high summoner.storage.dark_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_dark_mid summoner.storage.dark_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_dark_low summoner.storage.light_essence[Storage.ESSENCE_HIGH] -= monster.monster.awaken_mats_light_high summoner.storage.light_essence[Storage.ESSENCE_MID] -= monster.monster.awaken_mats_light_mid summoner.storage.light_essence[Storage.ESSENCE_LOW] -= monster.monster.awaken_mats_light_low summoner.storage.save() # Perform the awakening by instance's monster source ID monster.monster = monster.monster.awakens_to monster.save() response_data = { 'code': 'success', 'removeElement': '#awakenMonsterButton', } else: storage = summoner.storage.get_storage() available_essences = OrderedDict() for element, essences in monster.monster.get_awakening_materials().items(): available_essences[element] = OrderedDict() for size, cost in essences.items(): if cost > 0: available_essences[element][size] = { 'qty': storage[element][size], 'sufficient': storage[element][size] >= cost, } context = { 'awaken_form': form, 'available_essences': available_essences, 'instance': monster, } context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: error_template = loader.get_template('herders/profile/monster_already_awakened.html') response_data = { 'code': 'error', 'html': error_template.render() } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_instance_duplicate(request, profile_name, instance_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) # Check for proper owner before copying if request.user.summoner == monster.owner: newmonster = monster newmonster.pk = None newmonster.save() messages.success(request, 'Succesfully copied ' + str(newmonster)) view_mode = request.session.get('profile_view_mode', 'list').lower() if view_mode == 'list': template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html') else: template = loader.get_template('herders/profile/monster_inventory/monster_box_snippet.html') context = { 'profile_name': profile_name, 'is_owner': True, 'instance': newmonster, } response_data = { 'code': 'success', 'instance_id': newmonster.pk.hex, 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_add(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if request.method == 'POST': form = MonsterPieceForm(request.POST or None) else: form = MonsterPieceForm() form.helper.form_action = reverse('herders:monster_piece_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if request.method == 'POST' and form.is_valid(): # Create the monster instance new_pieces = form.save(commit=False) new_pieces.owner = request.user.summoner new_pieces.save() messages.success(request, 'Added %s to your collection.' % new_pieces) response_data = { 'code': 'success' } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def monster_piece_edit(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) template = loader.get_template('herders/profile/monster_inventory/monster_piece_form.html') if is_owner: form = MonsterPieceForm(request.POST or None, instance=pieces) form.helper.form_action = request.path if request.method == 'POST' and form.is_valid(): new_piece = form.save() template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': new_piece, 'is_owner': is_owner, } response_data = { 'code': 'success', 'instance_id': new_piece.pk.hex, 'html': template.render(context), } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context), } return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_summon(request, profile_name, instance_id): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() pieces = get_object_or_404(MonsterPiece, pk=instance_id) is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: if pieces.can_summon(): new_monster = MonsterInstance.objects.create(owner=summoner, monster=pieces.monster, stars=pieces.monster.base_stars, level=1, fodder=False, notes='', priority=MonsterInstance.PRIORITY_DONE) messages.success(request, 'Added %s to your collection.' % new_monster) # Remove the pieces, delete if 0 pieces.pieces -= pieces.PIECE_REQUIREMENTS[pieces.monster.base_stars] pieces.save() response_data = { 'code': 'success', } if pieces.pieces <= 0: pieces.delete() else: template = loader.get_template('herders/profile/monster_inventory/monster_piece_snippet.html') context = { 'piece': pieces, 'is_owner': is_owner, } response_data['instance_id'] = pieces.pk.hex response_data['html'] = template.render(context), return JsonResponse(response_data) else: raise PermissionDenied() @login_required() def monster_piece_delete(request, profile_name, instance_id): return_path = request.GET.get( 'next', reverse('herders:profile_default', kwargs={'profile_name': profile_name}) ) pieces = get_object_or_404(MonsterPiece, pk=instance_id) # Check for proper owner before deleting if request.user.summoner == pieces.owner: messages.warning(request, 'Deleted ' + str(pieces)) pieces.delete() return redirect(return_path) else: return HttpResponseForbidden() def fusion_progress(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) fusions = Fusion.objects.all() context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'fusions': fusions, } return render(request, 'herders/profile/fusion/base.html', context) def fusion_progress_detail(request, profile_name, monster_slug): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'view': 'fusion', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: try: fusion = Fusion.objects.get(product__bestiary_slug=monster_slug) except Fusion.DoesNotExist: return HttpResponseBadRequest() else: level = 10 + fusion.stars * 5 ingredients = [] # Check if fusion has been completed already fusion_complete = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to) ).exists() # Scan summoner's collection for instances each ingredient fusion_ready = True for ingredient in fusion.ingredients.all().select_related('awakens_from', 'awakens_to'): owned_ingredients = MonsterInstance.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).order_by('-stars', '-level', '-monster__is_awakened') owned_ingredient_pieces = MonsterPiece.objects.filter( Q(owner=summoner), Q(monster=ingredient) | Q(monster=ingredient.awakens_from), ).first() # Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion for owned_ingredient in owned_ingredients: if not owned_ingredient.ignore_for_fusion: acquired = True evolved = owned_ingredient.stars >= fusion.stars leveled = owned_ingredient.level >= level awakened = owned_ingredient.monster.is_awakened complete = acquired & evolved & leveled & awakened break else: if owned_ingredient_pieces: acquired = owned_ingredient_pieces.can_summon() else: acquired = False evolved = False leveled = False awakened = False complete = False if not complete: fusion_ready = False # Check if this ingredient is fusable sub_fusion = None sub_fusion_awakening_cost = None try: sub_fusion = Fusion.objects.get(product=ingredient.awakens_from) except Fusion.DoesNotExist: pass else: if not acquired: awakened_sub_fusion_ingredients = MonsterInstance.objects.filter( monster__pk__in=sub_fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) sub_fusion_awakening_cost = sub_fusion.total_awakening_cost(awakened_sub_fusion_ingredients) ingredient_progress = { 'instance': ingredient, 'owned': owned_ingredients, 'pieces': owned_ingredient_pieces, 'complete': complete, 'acquired': acquired, 'evolved': evolved, 'leveled': leveled, 'awakened': awakened, 'is_fuseable': True if sub_fusion else False, 'sub_fusion_cost': sub_fusion_awakening_cost, } ingredients.append(ingredient_progress) awakened_owned_ingredients = MonsterInstance.objects.filter( monster__pk__in=fusion.ingredients.values_list('pk', flat=True), ignore_for_fusion=False, owner=summoner, ) total_cost = fusion.total_awakening_cost(awakened_owned_ingredients) essences_satisfied, total_missing = fusion.missing_awakening_cost(summoner) # Determine the total/missing essences including sub-fusions if fusion.sub_fusion_available(): total_sub_fusion_cost = deepcopy(total_cost) for ingredient in ingredients: if ingredient['sub_fusion_cost']: for element, sizes in total_sub_fusion_cost.items(): for size, qty in sizes.items(): total_sub_fusion_cost[element][size] += ingredient['sub_fusion_cost'][element][size] # Now determine what's missing based on owner's storage storage = summoner.storage.get_storage() sub_fusion_total_missing = { element: { size: total_sub_fusion_cost[element][size] - storage[element][size] if total_sub_fusion_cost[element][size] > storage[element][size] else 0 for size, qty in element_sizes.items() } for element, element_sizes in total_sub_fusion_cost.items() } sub_fusion_mats_satisfied = True for sizes in total_sub_fusion_cost.values(): for qty in sizes.values(): if qty > 0: sub_fusion_mats_satisfied = False else: sub_fusion_total_missing = None sub_fusion_mats_satisfied = None progress = { 'instance': fusion.product, 'acquired': fusion_complete, 'stars': fusion.stars, 'level': level, 'cost': fusion.cost, 'ingredients': ingredients, 'awakening_mats_cost': total_cost, 'awakening_mats_sufficient': essences_satisfied, 'awakening_mats_missing': total_missing, 'sub_fusion_mats_missing': sub_fusion_total_missing, 'sub_fusion_mats_sufficient': sub_fusion_mats_satisfied, 'ready': fusion_ready, } context['fusion'] = progress return render(request, 'herders/profile/fusion/fusion_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) def teams(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) add_team_group_form = AddTeamGroupForm() context = { 'view': 'teams', 'profile_name': profile_name, 'summoner': summoner, 'return_path': return_path, 'is_owner': is_owner, 'add_team_group_form': add_team_group_form, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/teams_base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def team_list(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Get team objects for the summoner team_groups = TeamGroup.objects.filter(owner=summoner) context = { 'profile_name': profile_name, 'is_owner': is_owner, 'team_groups': team_groups, } return render(request, 'herders/profile/teams/team_list.html', context) @login_required def team_group_add(request, profile_name): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddTeamGroupForm(request.POST or None) if is_owner: if form.is_valid() and request.method == 'POST': # Create the monster instance new_group = form.save(commit=False) new_group.owner = request.user.summoner new_group.save() return redirect(return_path) else: return PermissionDenied("Attempting to add group to profile you don't own.") @login_required def team_group_edit(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = EditTeamGroupForm(request.POST or None, instance=team_group) if is_owner: if form.is_valid() and request.method == 'POST': form.save() return redirect(return_path) else: return PermissionDenied("Editing a group you don't own") context = { 'profile_name': profile_name, 'summoner': summoner, 'form': form, 'group_id': group_id, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } return render(request, 'herders/profile/teams/team_group_edit.html', context) @login_required def team_group_delete(request, profile_name, group_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team_group = get_object_or_404(TeamGroup, pk=group_id) form = DeleteTeamGroupForm(request.POST or None) form.helper.form_action = request.path form.fields['reassign_group'].queryset = TeamGroup.objects.filter(owner=summoner).exclude(pk=group_id) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'form': form, } if is_owner: if request.method == 'POST' and form.is_valid(): list_of_teams = Team.objects.filter(group__pk=group_id) if request.POST.get('delete', False): list_of_teams.delete() else: new_group = form.cleaned_data['reassign_group'] if new_group: for team in list_of_teams: team.group = new_group team.save() else: context['validation_errors'] = 'Please specify a group to reassign to.' if team_group.team_set.count() > 0: return render(request, 'herders/profile/teams/team_group_delete.html', context) else: messages.warning(request, 'Deleted team group %s' % team_group.name) team_group.delete() return redirect(return_path) else: return PermissionDenied() def team_detail(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) team = get_object_or_404(Team, pk=team_id) team_effects = [] if team.leader and team.leader.monster.all_skill_effects(): for effect in team.leader.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) for team_member in team.roster.all(): if team_member.monster.all_skill_effects(): for effect in team_member.monster.all_skill_effects(): if effect not in team_effects: team_effects.append(effect) context = { 'view': 'teams', 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'team': team, 'team_buffs': team_effects, } if is_owner or summoner.public: return render(request, 'herders/profile/teams/team_detail.html', context) else: return render(request, 'herders/profile/not_public.html', context) @login_required def team_edit(request, profile_name, team_id=None): return_path = reverse('herders:teams', kwargs={'profile_name': profile_name}) if team_id: team = Team.objects.get(pk=team_id) edit_form = EditTeamForm(request.POST or None, instance=team) else: edit_form = EditTeamForm(request.POST or None) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) # Limit form choices to objects owned by the current user. edit_form.fields['group'].queryset = TeamGroup.objects.filter(owner=summoner) edit_form.fields['leader'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.fields['roster'].queryset = MonsterInstance.objects.filter(owner=summoner) edit_form.helper.form_action = request.path + '?next=' + return_path context = { 'profile_name': profile_name, 'return_path': return_path, 'is_owner': is_owner, 'view': 'teams', } if is_owner: edit_form.full_clean() # re-clean due to updated querysets after form initialization if request.method == 'POST' and edit_form.is_valid(): team = edit_form.save(commit=False) team.owner = summoner team.save() edit_form.save_m2m() messages.success(request, 'Saved changes to %s - %s.' % (team.group, team)) return team_detail(request, profile_name, team.pk.hex) else: raise PermissionDenied() context['edit_team_form'] = edit_form return render(request, 'herders/profile/teams/team_edit.html', context) @login_required def team_delete(request, profile_name, team_id): return_path = request.GET.get( 'next', reverse('herders:teams', kwargs={'profile_name': profile_name}) ) team = get_object_or_404(Team, pk=team_id) # Check for proper owner before deleting if request.user.summoner == team.group.owner: team.delete() messages.warning(request, 'Deleted team %s - %s.' % (team.group, team)) return redirect(return_path) else: return HttpResponseForbidden() def runes(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return render(request, 'herders/profile/not_found.html') is_owner = (request.user.is_authenticated() and summoner.user == request.user) filter_form = FilterRuneForm(auto_id="filter_id_%s") filter_form.helper.form_action = reverse('herders:rune_inventory', kwargs={'profile_name': profile_name}) context = { 'view': 'runes', 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, 'old_rune_count': RuneInstance.objects.filter(owner=summoner, substats__isnull=True).count(), 'rune_filter_form': filter_form, } if is_owner or summoner.public: return render(request, 'herders/profile/runes/base.html', context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory(request, profile_name, view_mode=None, box_grouping=None): # If we passed in view mode or sort method, set the session variable and redirect back to base profile URL if view_mode: request.session['rune_inventory_view_mode'] = view_mode.lower() if box_grouping: request.session['rune_inventory_box_method'] = box_grouping.lower() if request.session.modified: return HttpResponse("Rune view mode cookie set") view_mode = request.session.get('rune_inventory_view_mode', 'box').lower() box_grouping = request.session.get('rune_inventory_box_method', 'slot').lower() if view_mode == 'crafts': return rune_inventory_crafts(request, profile_name) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) rune_queryset = RuneInstance.objects.filter(owner=summoner).select_related('assigned_to', 'assigned_to__monster') total_count = rune_queryset.count() form = FilterRuneForm(request.POST or None) if form.is_valid(): rune_filter = RuneInstanceFilter(form.cleaned_data, queryset=rune_queryset) else: rune_filter = RuneInstanceFilter(None, queryset=rune_queryset) filtered_count = rune_filter.qs.count() context = { 'runes': rune_filter.qs, 'total_count': total_count, 'filtered_count': filtered_count, 'profile_name': profile_name, 'summoner': summoner, 'is_owner': is_owner, } if is_owner or summoner.public: if view_mode == 'box': rune_box = [] if box_grouping == 'slot': rune_box.append({ 'name': 'Slot 1', 'runes': rune_filter.qs.filter(slot=1) }) rune_box.append({ 'name': 'Slot 2', 'runes': rune_filter.qs.filter(slot=2) }) rune_box.append({ 'name': 'Slot 3', 'runes': rune_filter.qs.filter(slot=3) }) rune_box.append({ 'name': 'Slot 4', 'runes': rune_filter.qs.filter(slot=4) }) rune_box.append({ 'name': 'Slot 5', 'runes': rune_filter.qs.filter(slot=5) }) rune_box.append({ 'name': 'Slot 6', 'runes': rune_filter.qs.filter(slot=6) }) elif box_grouping == 'grade': rune_box.append({ 'name': '6*', 'runes': rune_filter.qs.filter(stars=6) }) rune_box.append({ 'name': '5*', 'runes': rune_filter.qs.filter(stars=5) }) rune_box.append({ 'name': '4*', 'runes': rune_filter.qs.filter(stars=4) }) rune_box.append({ 'name': '3*', 'runes': rune_filter.qs.filter(stars=3) }) rune_box.append({ 'name': '2*', 'runes': rune_filter.qs.filter(stars=2) }) rune_box.append({ 'name': '1*', 'runes': rune_filter.qs.filter(stars=1) }) elif box_grouping == 'equipped': rune_box.append({ 'name': 'Not Equipped', 'runes': rune_filter.qs.filter(assigned_to__isnull=True) }) # Create a dictionary of monster PKs and their equipped runes monsters = OrderedDict() for rune in rune_filter.qs.filter(assigned_to__isnull=False).select_related('assigned_to', 'assigned_to__monster').order_by('assigned_to__monster__name', 'slot'): if rune.assigned_to.pk not in monsters: monsters[rune.assigned_to.pk] = { 'name': str(rune.assigned_to), 'runes': [] } monsters[rune.assigned_to.pk]['runes'].append(rune) for monster_runes in monsters.values(): rune_box.append(monster_runes) elif box_grouping == 'type': for (type, type_name) in RuneInstance.TYPE_CHOICES: rune_box.append({ 'name': type_name, 'runes': rune_filter.qs.filter(type=type) }) context['runes'] = rune_box context['box_grouping'] = box_grouping template = 'herders/profile/runes/inventory.html' elif view_mode == 'grid': template = 'herders/profile/runes/inventory_grid.html' else: template = 'herders/profile/runes/inventory_table.html' return render(request, template, context) else: return render(request, 'herders/profile/not_public.html', context) def rune_inventory_crafts(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) context = { 'profile_name': profile_name, 'is_owner': is_owner, } if is_owner or summoner.public: craft_box = OrderedDict() for (craft, craft_name) in RuneInstance.CRAFT_CHOICES: craft_box[craft_name] = OrderedDict() for rune, rune_name in RuneInstance.TYPE_CHOICES: craft_box[craft_name][rune_name] = RuneCraftInstance.objects.filter(owner=summoner, type=craft, rune=rune).order_by('stat', 'quality') # Immemorial craft_box[craft_name]['Immemorial'] = RuneCraftInstance.objects.filter(owner=summoner, type=craft, rune__isnull=True).order_by('stat', 'quality') context['crafts'] = craft_box return render(request, 'herders/profile/runes/inventory_crafts.html', context) else: return render(request, 'herders/profile/not_public.html') @login_required def rune_add(request, profile_name): form = AddRuneInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_form.html') if request.method == 'POST': if form.is_valid(): # Create the rune instance new_rune = form.save(commit=False) new_rune.owner = request.user.summoner new_rune.save() messages.success(request, 'Added ' + str(new_rune)) # Send back blank form form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: # Check for any pre-filled GET parameters slot = request.GET.get('slot', None) assigned_to = request.GET.get('assigned_to', None) try: assigned_monster = MonsterInstance.objects.get(owner=request.user.summoner, pk=assigned_to) except MonsterInstance.DoesNotExist: assigned_monster = None form = AddRuneInstanceForm(initial={ 'assigned_to': assigned_monster, 'slot': slot if slot is not None else 1, }) form.helper.form_action = reverse('herders:rune_add', kwargs={'profile_name': profile_name}) # Return form filled in and errors shown context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_edit(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneInstanceForm(request.POST or None, instance=rune, auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) template = loader.get_template('herders/profile/runes/add_form.html') if is_owner: context = {'add_rune_form': form} context.update(csrf(request)) if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm(auto_id='edit_id_%s') form.helper.form_action = reverse('herders:rune_edit', kwargs={'profile_name': profile_name, 'rune_id': rune_id}) context = {'add_rune_form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'add_rune_form': form} context.update(csrf(request)) # Return form filled in and errors shown response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_assign(request, profile_name, instance_id, slot=None): rune_queryset = RuneInstance.objects.filter(owner=request.user.summoner, assigned_to=None) filter_form = AssignRuneForm(request.POST or None, initial={'slot': slot}, prefix='assign') filter_form.helper.form_action = reverse('herders:rune_assign', kwargs={'profile_name': profile_name, 'instance_id': instance_id}) if slot: rune_queryset = rune_queryset.filter(slot=slot) if request.method == 'POST' and filter_form.is_valid(): rune_filter = RuneInstanceFilter(filter_form.cleaned_data, queryset=rune_queryset) template = loader.get_template('herders/profile/runes/assign_results.html') context = { 'filter': rune_filter.qs, 'profile_name': profile_name, 'instance_id': instance_id, } context.update(csrf(request)) response_data = { 'code': 'results', 'html': template.render(context) } else: rune_filter = RuneInstanceFilter(queryset=rune_queryset) template = loader.get_template('herders/profile/runes/assign_form.html') context = { 'filter': rune_filter.qs, 'form': filter_form, 'profile_name': profile_name, 'instance_id': instance_id, } context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_assign_choice(request, profile_name, instance_id, rune_id): monster = get_object_or_404(MonsterInstance, pk=instance_id) rune = get_object_or_404(RuneInstance, pk=rune_id) if rune.assigned_to is not None: # TODO: Warn about removing from other monster? pass # Check for existing rune. existing_runes = monster.runeinstance_set.filter(slot=rune.slot) for existing_rune in existing_runes: existing_rune.assigned_to = None rune.assigned_to = monster rune.save() monster.save() response_data = { 'code': 'success', } return JsonResponse(response_data) @login_required def rune_unassign(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to rune.assigned_to = None rune.save() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required() def rune_unassign_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) assigned_mons = [] assigned_runes = RuneInstance.objects.filter(owner=summoner, assigned_to__isnull=False) number_assigned = assigned_runes.count() if is_owner: for rune in assigned_runes: if rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) rune.assigned_to = None rune.save() # Resave monster instances that had runes removed to recalc stats for mon in assigned_mons: mon.save() messages.success(request, 'Unassigned ' + str(number_assigned) + ' rune(s).') response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete(request, profile_name, rune_id): rune = get_object_or_404(RuneInstance, pk=rune_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: mon = rune.assigned_to messages.warning(request, 'Deleted ' + str(rune)) rune.delete() if mon: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_delete_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: # Delete the runes death_row = RuneInstance.objects.filter(owner=summoner) number_killed = death_row.count() assigned_mons = [] for rune in death_row: if rune.assigned_to and rune.assigned_to not in assigned_mons: assigned_mons.append(rune.assigned_to) death_row.delete() # Delete the crafts RuneCraftInstance.objects.filter(owner=summoner).delete() messages.warning(request, 'Deleted ' + str(number_killed) + ' runes.') for mon in assigned_mons: mon.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_resave_all(request, profile_name): try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: for r in RuneInstance.objects.filter(owner=summoner, substats__isnull=True): r.save() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_add(request, profile_name): form = AddRuneCraftInstanceForm(request.POST or None) form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if request.method == 'POST': if form.is_valid(): # Create the monster instance new_craft = form.save(commit=False) new_craft.owner = request.user.summoner new_craft.save() messages.success(request, 'Added ' + new_craft.get_type_display() + ' ' + str(new_craft)) # Send back blank form form = AddRuneCraftInstanceForm() form.helper.form_action = reverse('herders:rune_craft_add', kwargs={'profile_name': profile_name}) context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'html': template.render(context) } return JsonResponse(response_data) @login_required def rune_craft_edit(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) form = AddRuneCraftInstanceForm(request.POST or None, instance=craft) form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) template = loader.get_template('herders/profile/runes/add_craft_form.html') if is_owner: if request.method == 'POST' and form.is_valid(): rune = form.save() messages.success(request, 'Saved changes to ' + str(rune)) form = AddRuneInstanceForm() form.helper.form_action = reverse('herders:rune_craft_edit', kwargs={'profile_name': profile_name, 'craft_id': craft_id}) context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'success', 'html': template.render(context) } else: # Return form filled in and errors shown context = {'form': form} context.update(csrf(request)) response_data = { 'code': 'error', 'html': template.render(context) } return JsonResponse(response_data) else: return HttpResponseForbidden() @login_required def rune_craft_delete(request, profile_name, craft_id): craft = get_object_or_404(RuneCraftInstance, pk=craft_id) try: summoner = Summoner.objects.select_related('user').get(user__username=profile_name) except Summoner.DoesNotExist: return HttpResponseBadRequest() is_owner = (request.user.is_authenticated() and summoner.user == request.user) if is_owner: messages.warning(request, 'Deleted ' + craft.get_rune_display() + ' ' + str(craft)) craft.delete() response_data = { 'code': 'success', } return JsonResponse(response_data) else: return HttpResponseForbidden()
from __future__ import division import random from itertools import combinations_with_replacement, izip from pydigree.common import * from pydigree.ibs import ibs from pydigree.io.smartopen import smartopen from pydigree.io.plink import write_plink, write_map from pydigree.individual import Individual from pydigree.exceptions import SimulationError # A base class for simulations to inherit from class Simulation(object): def __init__(self, template=None, replications=1000): self.template = template self.replications = replications self.accuracy_threshold = 0.9 self.constraints = {'genotype': {}, 'ibd': {}} self.trait = None self.founder_genotype_hooks = [] def set_trait(self, architecture): self.trait = architecture def replicate(self): raise NotImplementedError("This is a base class don't call me") def get_founder_genotypes(self): geno_constraints = self.constraints['genotype'] for ind in ped.founders(): ind.clear_genotypes() if ind not in geno_constraints: ind.get_genotypes(linkeq=linkeq) else: ind.get_constrained_genotypes(geno_constraints[ind], linkeq=linkeq) self.run_founder_genotype_hooks() def run(self, verbose=False, writeibd=False, output_predicate=None, compression=None): write_map(self.template, '{0}.map'.format(self.prefix)) for x in xrange(self.replications): print 'Replicate %d' % (x + 1) self.replicate( verbose=verbose, writeibd=writeibd, replicatenumber=x) self.write_data( x, predicate=output_predicate, compression=compression) def write_data(self, replicatenumber, predicate=None, compression=None): filename = '{0}-{1}'.format(self.prefix, (replicatenumber + 1)) write_plink(self.template, filename, predicate=predicate, mapfile=False, compression=compression) def _writeibd(self, replicatenumber): # Warning: Don't call this function! If the individuals in the pedigree dont have # LABEL genotypes, you're just going to get IBS configurations at each locus, not # actual IBD calculations. # # If you have data you want to identify IBD segments in, check # pydigree.sgs with smartopen('{0}-{1}.ibd.gz'.format(self.prefix, replicatenumber + 1), 'w') as of: for ped in self.template: for ind1, ind2 in combinations_with_replacement(ped.individuals, 2): identical = [] for chrom_idx, chromosome in enumerate(ind1.chromosomes): if ind1 == ind2: genos = izip(*ind1.genotypes[chrom_idx]) ibd = [2 * (x == y) for x, y in genos] else: genos1 = izip(*ind1.genotypes[chrom_idx]) genos2 = izip(*ind2.genotypes[chrom_idx]) ibd = [ibs(g1, g2) for g1, g2 in izip(genos1, genos2)] identical.extend(ibd) outline = [ped.label, ind1.label, ind2.label] + identical outline = ' '.join([str(x) for x in outline]) of.write('{}\n'.format(outline)) def predicted_trait_accuracy(self, ped): calls = [(ind.predicted_phenotype(self.trait), ind.phenotypes['affected']) for ind in ped if ind.phenotypes['affected'] is not None] # Remember: in python the bools True and False are actually alternate # names for the integers 1 and 0, respectively, so you can do # arithmetic with them if you so please. Here we sum up all the # correct predictions and divide by the number of predictions made. return sum(x == y for x, y in calls) / len(calls) def read_constraints(self, filename): if not self.template: raise ValueError() with open(filename) as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue l = line.split() if l[0].lower() == 'genotype': type, ped, id, chr, index, allele, chromatid, method = l locus = (chr, index) ind = self.template[ped][id] self.add_genotype_constraint(ind, locus, allele, chromatid, method) elif l[0].lower() == 'ibd': type, ped, id, ancestor, chr, index, anc_chromatid = l locus = (chr, index) ind = self.template[ped][id] ancestor = self.template[ped][ancestor] self.add_ibd_constraint(ind, ancestor, locus, anc_chromatid) else: raise ValueError('Not a valid constraint (%s)' % l[0]) def add_genotype_constraint(self, ind, location, allele, chromatid, method='set'): if not ind.is_founder(): raise ValueError('Genotype constraints only for founders') if chromatid not in 'PM': raise ValueError('Not a valid haplotype. Choose P or M') chromatid = 1 if chromatid == 'M' else 0 location = tuple(int(x) for x in location) allele = int(allele) if ind not in self.constraints['genotype']: self.constraints['genotype'][ind] = [] c = (location, chromatid, allele, method) self.constraints['genotype'][ind].append(c) def add_ibd_constraint(self, ind, ancestor, location, anchap): if anchap not in 'PM': raise ValueError('Not a valid haplotype. Choose P or M') anchap = 1 if anchap == 'M' else 0 location = tuple(int(x) for x in location) if ind not in self.constraints['ibd']: self.constraints['ibd'][ind] = [] c = (ancestor, location, anchap) self.constraints['ibd'][ind].append(c) def add_founder_genotype_hook(self, func): founder_genotypes_hooks.append(func) def run_founder_genotype_hooks(self): for hook in self.founder_genotypes_hooks: for founder in self.template.founders(): hook(founder) code cleanup from __future__ import division import random from itertools import combinations_with_replacement, izip from pydigree.common import * from pydigree.ibs import ibs from pydigree.io.smartopen import smartopen from pydigree.io.plink import write_plink, write_map from pydigree.io.base import write_phenotypes from pydigree.individual import Individual from pydigree.exceptions import SimulationError # A base class for simulations to inherit from class Simulation(object): def __init__(self, template=None, label=None, replications=1000): self.template = template self.label = label if label is not None else 'unlabeled' self.replications = replications self.accuracy_threshold = 0.9 self.constraints = {'genotype': {}, 'ibd': {}} self.trait = None self.founder_genotype_hooks = [] def set_trait(self, architecture): self.trait = architecture def replicate(self): raise NotImplementedError("This is a base class don't call me") def get_founder_genotypes(self, linkeq=True): geno_constraints = self.constraints['genotype'] for ind in self.template.founders(): ind.clear_genotypes() if ind not in geno_constraints: ind.get_genotypes(linkeq=linkeq) else: ind.get_constrained_genotypes(geno_constraints[ind], linkeq=linkeq) self.run_founder_genotype_hooks() def run(self, verbose=False, writeibd=False, output_predicate=None, compression=None): write_map(self.template, '{0}.map'.format(self.label)) for x in xrange(self.replications): print 'Replicate %d' % (x + 1) self.replicate( verbose=verbose, writeibd=writeibd, replicatenumber=x) self.write_data( x, predicate=output_predicate, compression=compression) def write_data(self, replicatenumber, predicate=None, compression=None): filename = '{0}-{1}'.format(self.label, (replicatenumber + 1)) write_plink(self.template, filename, predicate=predicate, mapfile=False, compression=compression) write_phenotypes(self.template, filename + '.csv', predicate=predicate) def _writeibd(self, replicatenumber): # Warning: Don't call this function! If the individuals in the pedigree dont have # LABEL genotypes, you're just going to get IBS configurations at each locus, not # actual IBD calculations. # # If you have data you want to identify IBD segments in, check # pydigree.sgs with smartopen('{0}-{1}.ibd.gz'.format(self.label, replicatenumber + 1), 'w') as of: for ped in self.template: for ind1, ind2 in combinations_with_replacement(ped.individuals, 2): identical = [] for chrom_idx, chromosome in enumerate(ind1.chromosomes): if ind1 == ind2: genos = izip(*ind1.genotypes[chrom_idx]) ibd = [2 * (x == y) for x, y in genos] else: genos1 = izip(*ind1.genotypes[chrom_idx]) genos2 = izip(*ind2.genotypes[chrom_idx]) ibd = [ibs(g1, g2) for g1, g2 in izip(genos1, genos2)] identical.extend(ibd) outline = [ped.label, ind1.label, ind2.label] + identical outline = ' '.join([str(x) for x in outline]) of.write('{}\n'.format(outline)) def predicted_trait_accuracy(self, ped): calls = [(ind.predicted_phenotype(self.trait), ind.phenotypes['affected']) for ind in ped if ind.phenotypes['affected'] is not None] # Remember: in python the bools True and False are actually alternate # names for the integers 1 and 0, respectively, so you can do # arithmetic with them if you so please. Here we sum up all the # correct predictions and divide by the number of predictions made. return sum(x == y for x, y in calls) / len(calls) def read_constraints(self, filename): if not self.template: raise ValueError() with open(filename) as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue l = line.split() if l[0].lower() == 'genotype': type, ped, id, chr, index, allele, chromatid, method = l locus = (chr, index) ind = self.template[ped][id] self.add_genotype_constraint(ind, locus, allele, chromatid, method) elif l[0].lower() == 'ibd': type, ped, id, ancestor, chr, index, anc_chromatid = l locus = (chr, index) ind = self.template[ped][id] ancestor = self.template[ped][ancestor] self.add_ibd_constraint(ind, ancestor, locus, anc_chromatid) else: raise ValueError('Not a valid constraint (%s)' % l[0]) def add_genotype_constraint(self, ind, location, allele, chromatid, method='set'): if not ind.is_founder(): raise ValueError('Genotype constraints only for founders') if chromatid not in 'PM': raise ValueError('Not a valid haplotype. Choose P or M') chromatid = 1 if chromatid == 'M' else 0 location = tuple(int(x) for x in location) allele = int(allele) if ind not in self.constraints['genotype']: self.constraints['genotype'][ind] = [] c = (location, chromatid, allele, method) self.constraints['genotype'][ind].append(c) def add_ibd_constraint(self, ind, ancestor, location, anchap): if anchap not in 'PM': raise ValueError('Not a valid haplotype. Choose P or M') anchap = 1 if anchap == 'M' else 0 location = tuple(int(x) for x in location) if ind not in self.constraints['ibd']: self.constraints['ibd'][ind] = [] c = (ancestor, location, anchap) self.constraints['ibd'][ind].append(c) def add_founder_genotype_hook(self, func): self.founder_genotype_hooks.append(func) def run_founder_genotype_hooks(self): for hook in self.founder_genotype_hooks: for founder in self.template.founders(): hook(founder)
__author__ = 'noe' import psutil import numpy as np from pyemma.coordinates.clustering.interface import AbstractClustering from pyemma.coordinates.transform.transformer import Transformer from pyemma.coordinates.io.reader import ChunkedReader from pyemma.coordinates.io.feature_reader import FeatureReader from pyemma.coordinates.util.chaining import build_chain from pyemma.util.log import getLogger logger = getLogger('Discretizer') __all__ = ['Discretizer'] class Discretizer(object): """ A Discretizer gets a FeatureReader, which defines the features (distances, angles etc.) of given trajectory data and passes this data in a memory efficient way through the given pipeline of a Transformer and a clustering. The clustering object is responsible for assigning the data to discrete states. Currently the constructor will calculate everything instantly. Parameters ---------- reader : a FeatureReader object reads trajectory data and selects features. transform : a Transformer object (optional) the Transformer will be used to e.g reduce dimensionality of inputs. cluster : a clustering object used to assign input data to discrete states/ discrete trajectories. """ def __init__(self, reader, transform=None, cluster=None, chunksize=None): # check input assert isinstance(reader, ChunkedReader), \ 'reader is not of the correct type' if (transform is not None): assert isinstance(transform, Transformer), \ 'transform is not of the correct type' if cluster is None: raise ValueError('Must specify a clustering algorithm!') else: assert isinstance(cluster, Transformer), \ 'cluster is not of the correct type' if hasattr(reader, 'featurizer'): # reader is a FeatureReader if reader.featurizer.dimension == 0: logger.warning("no features selected!") self.transformers = [reader] if transform is not None: self.transformers.append(transform) self.transformers.append(cluster) if chunksize is not None: build_chain(self.transformers, chunksize) else: self._chunksize = None build_chain(self.transformers) self._estimate_chunksize_from_mem_requirement(reader) self._parameterized = False def run(self): """ reads all data and discretizes it into discrete trajectories """ for trans in self.transformers: trans.parametrize() self._parameterized = True @property def dtrajs(self): """ get discrete trajectories """ if not self._parameterized: logger.info("not yet parametrized, running now.") self.run() return self.transformers[-1].dtrajs @property def chunksize(self): return self._chunksize @chunksize.setter def chunksize(self, cs): self._chunksize = cs # update transformers to use new chunksize for trans in self.transformers: trans.chunksize = cs def save_dtrajs(self, prefix='', output_format='ascii', extension='.dtraj'): """saves calculated discrete trajectories. Filenames are taken from given reader. If data comes from memory dtrajs are written to a default filename. Parameters ---------- prefix : str prepend prefix to filenames. output_format : str if format is 'ascii' dtrajs will be written as csv files, otherwise they will be written as NumPy .npy files. extension : str file extension to append (eg. '.itraj') """ clustering = self.transformers[-1] reader = self.transformers[0] assert isinstance(clustering, AbstractClustering) trajfiles = None if isinstance(reader, FeatureReader): trajfiles = reader.trajfiles clustering.save_dtrajs(trajfiles, prefix, output_format, extension) def _estimate_chunksize_from_mem_requirement(self, reader): """ estimate memory requirement from chain of transformers and sets a chunksize accordingly """ if not hasattr(reader, 'get_memory_per_frame'): self.chunksize = 0 return M = psutil.virtual_memory()[1] # available RAM in bytes logger.info("available RAM: %i" % M) const_mem = long(0) mem_per_frame = long(0) for trans in self.transformers: mem_per_frame += trans.get_memory_per_frame() const_mem += trans.get_constant_memory() logger.info("per-frame memory requirements: %i" % mem_per_frame) # maximum allowed chunk size logger.info("const mem: %i" % const_mem) chunksize = (M - const_mem) / mem_per_frame if chunksize < 0: raise MemoryError( 'Not enough memory for desired transformation chain!') # is this chunksize sufficient to store full trajectories? chunksize = min(chunksize, np.max(reader.trajectory_lengths())) logger.info("resulting chunk size: %i" % chunksize) # set chunksize self.chunksize = chunksize # any memory unused? if yes, we can store results Mfree = M - const_mem - chunksize * mem_per_frame logger.info("free memory: %i" % Mfree) # starting from the back of the pipeline, store outputs if possible for trans in reversed(self.transformers): mem_req_trans = trans.n_frames_total() * \ trans.get_memory_per_frame() if Mfree > mem_req_trans: Mfree -= mem_req_trans # TODO: before we are allowed to call this method, we have to ensure all memory requirements are correct! # trans.operate_in_memory() logger.info("spending %i bytes to operate in main memory: %s " % (mem_req_trans, trans.describe())) [discretizer] reflect module movement __author__ = 'noe' import psutil import numpy as np from pyemma.coordinates.clustering.interface import AbstractClustering from pyemma.coordinates.transform.transformer import Transformer from pyemma.coordinates.io.reader import ChunkedReader from pyemma.coordinates.io.feature_reader import FeatureReader from pyemma.coordinates.util.stat.chaining import build_chain from pyemma.util.log import getLogger logger = getLogger('Discretizer') __all__ = ['Discretizer'] class Discretizer(object): """ A Discretizer gets a FeatureReader, which defines the features (distances, angles etc.) of given trajectory data and passes this data in a memory efficient way through the given pipeline of a Transformer and a clustering. The clustering object is responsible for assigning the data to discrete states. Currently the constructor will calculate everything instantly. Parameters ---------- reader : a FeatureReader object reads trajectory data and selects features. transform : a Transformer object (optional) the Transformer will be used to e.g reduce dimensionality of inputs. cluster : a clustering object used to assign input data to discrete states/ discrete trajectories. """ def __init__(self, reader, transform=None, cluster=None, chunksize=None): # check input assert isinstance(reader, ChunkedReader), \ 'reader is not of the correct type' if (transform is not None): assert isinstance(transform, Transformer), \ 'transform is not of the correct type' if cluster is None: raise ValueError('Must specify a clustering algorithm!') else: assert isinstance(cluster, Transformer), \ 'cluster is not of the correct type' if hasattr(reader, 'featurizer'): # reader is a FeatureReader if reader.featurizer.dimension == 0: logger.warning("no features selected!") self.transformers = [reader] if transform is not None: self.transformers.append(transform) self.transformers.append(cluster) if chunksize is not None: build_chain(self.transformers, chunksize) else: self._chunksize = None build_chain(self.transformers) self._estimate_chunksize_from_mem_requirement(reader) self._parameterized = False def run(self): """ reads all data and discretizes it into discrete trajectories """ for trans in self.transformers: trans.parametrize() self._parameterized = True @property def dtrajs(self): """ get discrete trajectories """ if not self._parameterized: logger.info("not yet parametrized, running now.") self.run() return self.transformers[-1].dtrajs @property def chunksize(self): return self._chunksize @chunksize.setter def chunksize(self, cs): self._chunksize = cs # update transformers to use new chunksize for trans in self.transformers: trans.chunksize = cs def save_dtrajs(self, prefix='', output_format='ascii', extension='.dtraj'): """saves calculated discrete trajectories. Filenames are taken from given reader. If data comes from memory dtrajs are written to a default filename. Parameters ---------- prefix : str prepend prefix to filenames. output_format : str if format is 'ascii' dtrajs will be written as csv files, otherwise they will be written as NumPy .npy files. extension : str file extension to append (eg. '.itraj') """ clustering = self.transformers[-1] reader = self.transformers[0] assert isinstance(clustering, AbstractClustering) trajfiles = None if isinstance(reader, FeatureReader): trajfiles = reader.trajfiles clustering.save_dtrajs(trajfiles, prefix, output_format, extension) def _estimate_chunksize_from_mem_requirement(self, reader): """ estimate memory requirement from chain of transformers and sets a chunksize accordingly """ if not hasattr(reader, 'get_memory_per_frame'): self.chunksize = 0 return M = psutil.virtual_memory()[1] # available RAM in bytes logger.info("available RAM: %i" % M) const_mem = long(0) mem_per_frame = long(0) for trans in self.transformers: mem_per_frame += trans.get_memory_per_frame() const_mem += trans.get_constant_memory() logger.info("per-frame memory requirements: %i" % mem_per_frame) # maximum allowed chunk size logger.info("const mem: %i" % const_mem) chunksize = (M - const_mem) / mem_per_frame if chunksize < 0: raise MemoryError( 'Not enough memory for desired transformation chain!') # is this chunksize sufficient to store full trajectories? chunksize = min(chunksize, np.max(reader.trajectory_lengths())) logger.info("resulting chunk size: %i" % chunksize) # set chunksize self.chunksize = chunksize # any memory unused? if yes, we can store results Mfree = M - const_mem - chunksize * mem_per_frame logger.info("free memory: %i" % Mfree) # starting from the back of the pipeline, store outputs if possible for trans in reversed(self.transformers): mem_req_trans = trans.n_frames_total() * \ trans.get_memory_per_frame() if Mfree > mem_req_trans: Mfree -= mem_req_trans # TODO: before we are allowed to call this method, we have to ensure all memory requirements are correct! # trans.operate_in_memory() logger.info("spending %i bytes to operate in main memory: %s " % (mem_req_trans, trans.describe()))
#!/usr/bin/env python """ Created as part of the StratusLab project (http://stratuslab.eu), co-funded by the European Commission under the Grant Agreement INSFO-RI-261552. Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __version__ = "1.0" __author__ = "Guillaume PHILIPPON <guillaume.philippon@lal.in2p3.fr>" import sys sys.path.append('/var/lib/stratuslab/python') import os import re from optparse import OptionParser, OptionGroup import ConfigParser import socket import httplib2 import json from StringIO import StringIO from urllib import urlencode from subprocess import call import commands sample_example=""" [main] pdisk_user=pdisk pdisk_passwd=xxxxxx register_filename=pdisk getTurlCallback=/path/to/script vm_dir=/var/lib/one [iscsi] iscsiadm=/usr/sbin/iscsiadm """ config = ConfigParser.RawConfigParser() config.read('/etc/stratuslab/pdisk-host.conf') iscsiadm=config.get("iscsi","iscsiadm") login=config.get("main","pdisk_user") pswd=config.get("main","pdisk_passwd") getTurlCallback=config.get("main","getTurlCallback") vm_dir=config.get("main","vm_dir") parser=OptionParser() parser.add_option("--pdisk-id", dest="persistent_disk_id", help="Persistent disk id ( pdisk:enpoint:port:disk_uuid )", metavar="PID" ) parser.add_option("--vm-id", dest="vm_id", help="VM id", metavar="ID" ) parser.add_option("--vm-dir", dest="vm_dir", help="Directory where device will be created", metavar="DIR" ) parser.add_option("--vm-disk-name", dest="disk_name", help="Name of disk on Virtual Machine directory" ) parser.add_option("--target", dest="target", help="Device name on Virtual Machine" ) parser.add_option("--turl", dest="turl", metavar="TURL", default="", help="Transport URL of pdisk (protocol://server/protocol-option-to-access-file)" ) parser.add_option("--username", dest="username", help="Username use to interact with pdisk server" ) parser.add_option("--password", dest="password", help="Password use to interact with pdisk server" ) action=OptionGroup(parser," Action command") action.add_option("--attach", dest="attach", action="store_true", help="Attach/Detach backend to hypervisor" ) action.add_option("--register", dest="registration", action="store_true", help="Register/Unregister persistent disk as used on service" ) action.add_option("--link", dest="link", action="store_true", help="Link/Unlink attached disk in Virtual Machine directory" ) action.add_option("--mount", dest="mount", action="store_true", help="Mount/Unmount disk into Virtual Machine" ) action.add_option("--status", dest="status", action="store_true", help="Display status of current persistent disk" ) action.add_option("--no-check", dest="no_check", action="store_true", help="Disable check if device is used" ) action.add_option("--op", dest="operation", metavar="OP", help="up : active persistent disk ( register / attach / link / mount ) -- down : desactive persistent disk ( unmount / unlink / detach / unregister )" ) parser.add_option_group(action) (options, args) = parser.parse_args() if not options.operation: raise parser.error("--op options is mandatory") if options.attach: if not options.persistent_disk_id : raise parser.error("--attach option need --pdisk-id options") if options.registration: if not options.persistent_disk_id or not options.vm_id: raise parser.error("--register need --pdisk-id and --vm-id options") if options.link: if not options.persistent_disk_id or ( not vm_dir and not options.vm_dir ) or not options.vm_id or not options.disk_name : raise parser.error("--link need --pdisk-id, --vm-disk-name, --vm-id options are needed, --vm-dir if not define on configuration file ( /etc/stratuslab/pdisk-host.conf)") if options.mount: if not options.persistent_disk_id or not options.vm_id or not options.target: raise parser.error("--mount need --pdisk-id, --target and --vm-id options") if not options.persistent_disk_id: raise parser.error("--pdisk-id is mandatory") if options.vm_dir: vm_dir = options.vm_dir if options.username: login = options.username if options.password: pswd = options.password """ Metaclass for general persistent disk client """ class PersistentDisk: def __registration_uri__(self): return "https://"+self.endpoint+":"+self.port+"/pswd/disks/"+self.disk_uuid+"/" """ Register/Unregister mount on pdisk endpoint """ def register(self,login,pswd,vm_id): node=socket.gethostbyname(socket.gethostname()) url = self.__registration_uri__()+"mounts/" h = httplib2.Http("/tmp/.cache") h.disable_ssl_certificate_validation=True h.add_credentials(login,pswd) data = dict(node=node, vm_id=vm_id,register_only="true") try: resp, contents = h.request(url,"POST",urlencode(data)) except httplib2.ServerNotFoundError: raise RegisterPersistentDiskException('Register : Server '+self.endpoint+' not found') def unregister(self,login,pswd,vm_id): node=socket.gethostbyname(socket.gethostname()) url = self.__registration_uri__()+"mounts/"+self.disk_uuid+"_"+vm_id h = httplib2.Http("/tmp/.cache") h.add_credentials(login,pswd) h.disable_ssl_certificate_validation=True try: resp, contents = h.request(url,"DELETE") except httplib2.ServerNotFoundError: raise RegisterPersistentDiskException('Unregister : Server '+self.endpoint+' not found') """ Link/Unlink, create a link between device ( image or physical device) and Virtual Machine directory """ def link(self,src,dst): try: if os.path.exists(dst): os.unlink(dst) os.symlink(src,dst) except: raise LinkPersistentDiskException('Link : Error while linking '+src+' to '+dst) def unlink(self,link): if os.path.exists(link): os.unlink(link) """ Mount/Unmount display device to a VM """ def mount(self,vm_id,disk_name,target_device): hypervisor_device=vm_dir+"/"+str(vm_id)+"/images/"+disk_name domain_name="one-"+str(vm_id) cmd="sudo /usr/bin/virsh attach-disk "+domain_name+" "+hypervisor_device+" "+target_device retcode=call(cmd,shell=True) def umount(self,vm_id,target_device): domain_name="one-"+str(vm_id) cmd="sudo /usr/bin/virsh detach-disk "+domain_name+" "+target_device retcode=call(cmd,shell=True) """ _copy used to create a a xxxPersistentDisk object from PersistentDisk (xxxPersistentDisk is a inheritated class) """ def __copy__(self,pdisk): self.endpoint = pdisk.endpoint self.port = pdisk.port self.disk_uuid = pdisk.disk_uuid self.protocol = pdisk.protocol self.server = pdisk.server self.image = pdisk.image """ check_mount used to check if pdisk is already used return true if pdisk is free """ def check_mount(self,login,pswd): url = self.__registration_uri__() h = httplib2.Http("/tmp/.cache") h.add_credentials(login,pswd) h.disable_ssl_certificate_validation=True resp, contents = h.request(url) #print contents if resp.status != 200: raise CheckPersistentDiskException('Check_mount : error while check '+self.endpoint+' with url '+url) io = StringIO(contents) json_output = json.load(io) # try: # if json_output['count'] != '0': # raise CheckPersistentDiskException('Check_mount : pdisk pdisk:'+ self.endpoint+':'+self.port+':'+self.disk_uuid+' is mounted') return False # except KeyError: # return False """ __checkTurl__ check and split Transport URL ( proto://server:port/proto_options ) from pdisk id ( pdisk:endpoint:port:disk_uuid ) """ def __checkTurl__(self,turl): if turl == "": __url__ = "iscsi://"+self.endpoint+":3260/iqn.2011-01.eu.stratuslab:"+self.disk_uuid+":1" else: __url__ = turl __uri__ = re.match(r"(?P<protocol>.*)://(?P<server>.*)/(?P<image>.*)", __url__) try : self.protocol = __uri__.group('protocol') self.server = __uri__.group('server') self.image = __uri__.group('image') except AttributeError: raise URIPersistentDiskException('TURL '+ turl + ' not match expression protocol://server/protocol-options') def __init__(self, pdisk_id, turl): try: __pdisk__ = re.match(r"pdisk:(?P<server>.*):(?P<port>.*):(?P<disk_uuid>.*)", pdisk_id) self.endpoint = __pdisk__.group('server') self.port = __pdisk__.group('port') self.disk_uuid = __pdisk__.group('disk_uuid') self.__checkTurl__(turl) except AttributeError: raise PersistentDiskException('URI '+pdisk_id+' not match expression pdisk:endpoint:port:disk_uuid') class IscsiPersistentDisk(PersistentDisk): _unix_device_path='/dev/disk/by-path/' def image_storage(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname( __portal__.group('server') ) dev = "ip-" + __portal_ip__ + ":" + __portal__.group('port') + "-iscsi-" + self.iqn + "-lun-" + self.lun return self._unix_device_path+dev def attach(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname(__portal__.group('server')) reg = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " -o new" cmd = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " --login" retcode = call(reg, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") retcode = call(cmd, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") def detach(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname(__portal__.group('server')) cmd = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " --logout" unreg = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " -o delete" retcode = call(cmd, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") retcode = call(unreg, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") def __image2iqn__(self, str): __iqn__ = re.match(r"(?P<iqn>.*:.*):(?P<lun>.*)", str) self.iqn = __iqn__.group('iqn') self.lun = __iqn__.group('lun') def __init__(self,pdisk_class,turl): self.__copy__(pdisk_class) self.__image2iqn__(pdisk_class.image) class FilePersistentDisk(PersistentDisk): def __image2file__(self, str): __file__ = re.match(r"(?P<mount_point>.*)/(?P<full_path>.*)", str) self.mount_point = __file__.group('mount_point') self.full_path = __file__.group('full_path') def image_storage(self): return self.server+"/"+self.image def attach(self): pass def detach(self): pass def __init__(self,pdisk_class,turl): self.__copy__(pdisk_class) class PersistentDiskException(Exception): def __init__(self,value): self.value = value class RegisterPersistentDiskException(PersistentDisk): def __init__(self,value): self.value = value class AttachPersistentDiskException(PersistentDisk): def __init__(self,value): self.value = value class URIPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class LinkPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class MountPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class CheckPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class getTurlPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value def __init__(): try: global_pdisk = PersistentDisk(options.persistent_disk_id,options.turl) except getTurlPersistentDiskException: print "Error while try to retrive %s" % options.persistent_disk_id return -1 global vm_dir if global_pdisk.protocol == "iscsi" : pdisk = IscsiPersistentDisk(global_pdisk,options.turl) elif global_pdisk.protocol == "file" : pdisk = FilePersistentDisk(global_pdisk,options.turl) else : print "Protocol "+global_pdisk.protocol+" not supported" if options.operation == "up": try: if not options.no_check: pdisk.check_mount(login,pswd) if options.registration: pdisk.register(login,pswd,options.vm_id) if options.attach: pdisk.attach() if options.link: src = pdisk.image_storage() dst = vm_dir+"/"+str(options.vm_id)+"/images/"+options.disk_name pdisk.link(src,dst) if options.mount: pdisk.mount(options.vm_id,options.disk_name,options.target) except CheckPersistentDiskException: print "pdisk is market as used, please check or use --register --op down" except RegisterPersistentDiskException: print "Error while try to register on pdisk" except AttachPersistentDiskException: print "Error while try to attach backend to hypervisor" if options.registration: pdisk.unregister(login,pswd,options.vm_id) except LinkPersistentDiskException: print "Error while try to link %s to %s" % ( src , dst ) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) except MountPersistentDiskException: print "Error while try to mount %s to %s" % ( options.persistent_disk_id , options.vm_id ) if options.link: pdisk.unlink(dst) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) elif options.operation == "down": try: if options.mount: pdisk.umount(options.vm_id,options.target) if options.link: dst = vm_dir+"/"+str(options.vm_id)+"/images/"+options.disk_name pdisk.unlink(dst) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) except MountPersistentDiskException: print "Error while try to umount %s to %s " % ( options.persistent_disk_id , options.vm_id ) except LinkPersistentDiskException: print "Error while try to unlink %s" % dst except AttachPersistentDiskException: print "Error while try to detach %s from backend" % options.persistentd_disk_id except RegisterPersistentDiskException: print "Error while try to unregister as unused" else: raise parser.error("--op options only allow up or down") __init__() Minor doc update #!/usr/bin/env python """ Created as part of the StratusLab project (http://stratuslab.eu), co-funded by the European Commission under the Grant Agreement INSFO-RI-261552. Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __version__ = "1.0" __author__ = "Guillaume PHILIPPON <guillaume.philippon@lal.in2p3.fr>" import sys sys.path.append('/var/lib/stratuslab/python') import os import re from optparse import OptionParser, OptionGroup import ConfigParser import socket import httplib2 import json from StringIO import StringIO from urllib import urlencode from subprocess import call import commands sample_example=""" [main] pdisk_user=pdisk pdisk_passwd=xxxxxx register_filename=pdisk getTurlCallback=/path/to/script vm_dir=/var/lib/one [iscsi] iscsiadm=/usr/sbin/iscsiadm """ config = ConfigParser.RawConfigParser() config.read('/etc/stratuslab/pdisk-host.conf') iscsiadm=config.get("iscsi","iscsiadm") login=config.get("main","pdisk_user") pswd=config.get("main","pdisk_passwd") getTurlCallback=config.get("main","getTurlCallback") vm_dir=config.get("main","vm_dir") parser=OptionParser() parser.add_option("--pdisk-id", dest="persistent_disk_id", help="Persistent disk id ( pdisk:enpoint:port:disk_uuid )", metavar="PID" ) parser.add_option("--vm-id", dest="vm_id", help="VM id", metavar="ID" ) parser.add_option("--vm-dir", dest="vm_dir", help="Directory where device will be created", metavar="DIR" ) parser.add_option("--vm-disk-name", dest="disk_name", help="Name of disk on Virtual Machine directory" ) parser.add_option("--target", dest="target", help="Device name on Virtual Machine" ) parser.add_option("--turl", dest="turl", metavar="TURL", default="", help="Transport URL of pdisk (protocol://server/protocol-option-to-access-file)" ) parser.add_option("--username", dest="username", help="Username use to interact with pdisk server" ) parser.add_option("--password", dest="password", help="Password use to interact with pdisk server" ) action=OptionGroup(parser," Action command") action.add_option("--attach", dest="attach", action="store_true", help="Attach/Detach backend to hypervisor" ) action.add_option("--register", dest="registration", action="store_true", help="Register/Unregister persistent disk as used on service" ) action.add_option("--link", dest="link", action="store_true", help="Link/Unlink attached disk in Virtual Machine directory" ) action.add_option("--mount", dest="mount", action="store_true", help="Mount/Unmount disk into Virtual Machine" ) action.add_option("--status", dest="status", action="store_true", help="Display status of current persistent disk" ) action.add_option("--no-check", dest="no_check", action="store_true", help="Disable check if device is used" ) action.add_option("--op", dest="operation", metavar="OP", help="up : active persistent disk ( register / attach / link / mount ) -- down : desactive persistent disk ( unmount / unlink / detach / unregister )" ) parser.add_option_group(action) (options, args) = parser.parse_args() if not options.operation: raise parser.error("--op options is mandatory") if options.attach: if not options.persistent_disk_id : raise parser.error("--attach option need --pdisk-id options") if options.registration: if not options.persistent_disk_id or not options.vm_id: raise parser.error("--register need --pdisk-id and --vm-id options") if options.link: if not options.persistent_disk_id or ( not vm_dir and not options.vm_dir ) or not options.vm_id or not options.disk_name : raise parser.error("--link need --pdisk-id, --vm-disk-name, --vm-id options are needed, --vm-dir if not define on configuration file ( /etc/stratuslab/pdisk-host.conf)") if options.mount: if not options.persistent_disk_id or not options.vm_id or not options.target: raise parser.error("--mount need --pdisk-id, --target and --vm-id options") if not options.persistent_disk_id: raise parser.error("--pdisk-id is mandatory") if options.vm_dir: vm_dir = options.vm_dir if options.username: login = options.username if options.password: pswd = options.password """ Metaclass for general persistent disk client """ class PersistentDisk: def __registration_uri__(self): return "https://"+self.endpoint+":"+self.port+"/pswd/disks/"+self.disk_uuid+"/" """ Register/Unregister mount on pdisk endpoint """ def register(self,login,pswd,vm_id): node=socket.gethostbyname(socket.gethostname()) url = self.__registration_uri__()+"mounts/" h = httplib2.Http("/tmp/.cache") h.disable_ssl_certificate_validation=True h.add_credentials(login,pswd) data = dict(node=node, vm_id=vm_id,register_only="true") try: resp, contents = h.request(url,"POST",urlencode(data)) except httplib2.ServerNotFoundError: raise RegisterPersistentDiskException('Register : Server '+self.endpoint+' not found') def unregister(self,login,pswd,vm_id): node=socket.gethostbyname(socket.gethostname()) url = self.__registration_uri__()+"mounts/"+self.disk_uuid+"_"+vm_id h = httplib2.Http("/tmp/.cache") h.add_credentials(login,pswd) h.disable_ssl_certificate_validation=True try: resp, contents = h.request(url,"DELETE") except httplib2.ServerNotFoundError: raise RegisterPersistentDiskException('Unregister : Server '+self.endpoint+' not found') """ Link/Unlink, create a link between device ( image or physical device) and Virtual Machine directory """ def link(self,src,dst): try: if os.path.exists(dst): os.unlink(dst) os.symlink(src,dst) except: raise LinkPersistentDiskException('Link : Error while linking '+src+' to '+dst) def unlink(self,link): if os.path.exists(link): os.unlink(link) """ Mount/Unmount display device to a VM """ def mount(self,vm_id,disk_name,target_device): hypervisor_device=vm_dir+"/"+str(vm_id)+"/images/"+disk_name domain_name="one-"+str(vm_id) cmd="sudo /usr/bin/virsh attach-disk "+domain_name+" "+hypervisor_device+" "+target_device retcode=call(cmd,shell=True) def umount(self,vm_id,target_device): domain_name="one-"+str(vm_id) cmd="sudo /usr/bin/virsh detach-disk "+domain_name+" "+target_device retcode=call(cmd,shell=True) """ __copy__ used to create a a xxxPersistentDisk object from PersistentDisk (xxxPersistentDisk is a inherited class) """ def __copy__(self,pdisk): self.endpoint = pdisk.endpoint self.port = pdisk.port self.disk_uuid = pdisk.disk_uuid self.protocol = pdisk.protocol self.server = pdisk.server self.image = pdisk.image """ check_mount used to check if pdisk is already used return true if pdisk is free """ def check_mount(self,login,pswd): url = self.__registration_uri__() h = httplib2.Http("/tmp/.cache") h.add_credentials(login,pswd) h.disable_ssl_certificate_validation=True resp, contents = h.request(url) #print contents if resp.status != 200: raise CheckPersistentDiskException('Check_mount : error while check '+self.endpoint+' with url '+url) io = StringIO(contents) json_output = json.load(io) # try: # if json_output['count'] != '0': # raise CheckPersistentDiskException('Check_mount : pdisk pdisk:'+ self.endpoint+':'+self.port+':'+self.disk_uuid+' is mounted') return False # except KeyError: # return False """ __checkTurl__ check and split Transport URL ( proto://server:port/proto_options ) from pdisk id ( pdisk:endpoint:port:disk_uuid ) """ def __checkTurl__(self,turl): if turl == "": __url__ = "iscsi://"+self.endpoint+":3260/iqn.2011-01.eu.stratuslab:"+self.disk_uuid+":1" else: __url__ = turl __uri__ = re.match(r"(?P<protocol>.*)://(?P<server>.*)/(?P<image>.*)", __url__) try : self.protocol = __uri__.group('protocol') self.server = __uri__.group('server') self.image = __uri__.group('image') except AttributeError: raise URIPersistentDiskException('TURL '+ turl + ' not match expression protocol://server/protocol-options') def __init__(self, pdisk_id, turl): try: __pdisk__ = re.match(r"pdisk:(?P<server>.*):(?P<port>.*):(?P<disk_uuid>.*)", pdisk_id) self.endpoint = __pdisk__.group('server') self.port = __pdisk__.group('port') self.disk_uuid = __pdisk__.group('disk_uuid') self.__checkTurl__(turl) except AttributeError: raise PersistentDiskException('URI '+pdisk_id+' not match expression pdisk:endpoint:port:disk_uuid') class IscsiPersistentDisk(PersistentDisk): _unix_device_path='/dev/disk/by-path/' def image_storage(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname( __portal__.group('server') ) dev = "ip-" + __portal_ip__ + ":" + __portal__.group('port') + "-iscsi-" + self.iqn + "-lun-" + self.lun return self._unix_device_path+dev def attach(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname(__portal__.group('server')) reg = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " -o new" cmd = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " --login" retcode = call(reg, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") retcode = call(cmd, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") def detach(self): __portal__ = re.match(r"(?P<server>.*):(?P<port>.*)", self.server) __portal_ip__ = socket.gethostbyname(__portal__.group('server')) cmd = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " --logout" unreg = "sudo "+iscsiadm + " --mode node --portal " + __portal_ip__ + ":" + __portal__.group('port') + " --target " + self.iqn + " -o delete" retcode = call(cmd, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") retcode = call(unreg, shell=True) if retcode < 0: raise AttachPersistentDiskException("Error while attach iSCSI disk to hypervisor") def __image2iqn__(self, str): __iqn__ = re.match(r"(?P<iqn>.*:.*):(?P<lun>.*)", str) self.iqn = __iqn__.group('iqn') self.lun = __iqn__.group('lun') def __init__(self,pdisk_class,turl): self.__copy__(pdisk_class) self.__image2iqn__(pdisk_class.image) class FilePersistentDisk(PersistentDisk): def __image2file__(self, str): __file__ = re.match(r"(?P<mount_point>.*)/(?P<full_path>.*)", str) self.mount_point = __file__.group('mount_point') self.full_path = __file__.group('full_path') def image_storage(self): return self.server+"/"+self.image def attach(self): pass def detach(self): pass def __init__(self,pdisk_class,turl): self.__copy__(pdisk_class) class PersistentDiskException(Exception): def __init__(self,value): self.value = value class RegisterPersistentDiskException(PersistentDisk): def __init__(self,value): self.value = value class AttachPersistentDiskException(PersistentDisk): def __init__(self,value): self.value = value class URIPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class LinkPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class MountPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class CheckPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value class getTurlPersistentDiskException(PersistentDiskException): def __init__(self,value): self.value = value def __init__(): try: global_pdisk = PersistentDisk(options.persistent_disk_id,options.turl) except getTurlPersistentDiskException: print "Error while try to retrive %s" % options.persistent_disk_id return -1 global vm_dir if global_pdisk.protocol == "iscsi" : pdisk = IscsiPersistentDisk(global_pdisk,options.turl) elif global_pdisk.protocol == "file" : pdisk = FilePersistentDisk(global_pdisk,options.turl) else : print "Protocol "+global_pdisk.protocol+" not supported" if options.operation == "up": try: if not options.no_check: pdisk.check_mount(login,pswd) if options.registration: pdisk.register(login,pswd,options.vm_id) if options.attach: pdisk.attach() if options.link: src = pdisk.image_storage() dst = vm_dir+"/"+str(options.vm_id)+"/images/"+options.disk_name pdisk.link(src,dst) if options.mount: pdisk.mount(options.vm_id,options.disk_name,options.target) except CheckPersistentDiskException: print "pdisk is market as used, please check or use --register --op down" except RegisterPersistentDiskException: print "Error while try to register on pdisk" except AttachPersistentDiskException: print "Error while try to attach backend to hypervisor" if options.registration: pdisk.unregister(login,pswd,options.vm_id) except LinkPersistentDiskException: print "Error while try to link %s to %s" % ( src , dst ) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) except MountPersistentDiskException: print "Error while try to mount %s to %s" % ( options.persistent_disk_id , options.vm_id ) if options.link: pdisk.unlink(dst) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) elif options.operation == "down": try: if options.mount: pdisk.umount(options.vm_id,options.target) if options.link: dst = vm_dir+"/"+str(options.vm_id)+"/images/"+options.disk_name pdisk.unlink(dst) if options.attach: pdisk.detach() if options.registration: pdisk.unregister(login,pswd,options.vm_id) except MountPersistentDiskException: print "Error while try to umount %s to %s " % ( options.persistent_disk_id , options.vm_id ) except LinkPersistentDiskException: print "Error while try to unlink %s" % dst except AttachPersistentDiskException: print "Error while try to detach %s from backend" % options.persistentd_disk_id except RegisterPersistentDiskException: print "Error while try to unregister as unused" else: raise parser.error("--op options only allow up or down") __init__()
#!/usr/bin/python import sys,os import ConfigParser import mosquitto import json import requests import time class NEEG_DataCollector(mosquitto.Mosquitto): def __init__(self,ip = "localhost", port = 1883, clientId = "NEEG2MQTT", user = "driver", password = "1234", prefix = "ElectricGridData"): mosquitto.Mosquitto.__init__(self,clientId) self.prefix = prefix self.ip = ip self.port = port self.clientId = clientId self.user = user self.password = password if user != None: self.username_pw_set(user,password) self.will_set( topic = "system/" + self.prefix, payload="Offline", qos=1, retain=True) print "Connecting" self.connect(ip,keepalive=10) self.subscribe(self.prefix + "/#", 0) self.on_connect = self.mqtt_on_connect self.on_message = self.mqtt_on_message self.publish(topic = "system/"+ self.prefix, payload="Online", qos=1, retain=True) self.lastupdate = 0 self.lasttimestamp = 0 self.updateperiod = 120 self.oldvalues = {} self.running = True #thread.start_new_thread(self.ControlLoop,()) self.loop_start() def mqtt_on_connect(self, selfX,mosq, result): print "MQTT connected!" #self.subscribe(self.prefix + "/#", 0) def mqtt_on_message(self, selfX,mosq, msg): #print("RECIEVED MQTT MESSAGE: "+msg.topic + " " + str(msg.payload)) return def RunCollection(self): while(self.running): #Mark the time now = time.time() #Get data r = requests.get('http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview') #If we failed retry in a while if r.status_code != 200: print "Failed to get data from Nordic Electric Energy grid" time.sleep(30) continue self.lastupdate = now data = r.json() self.TranslateAndTransmitt(data) nextUpdate = self.lastupdate + self.updateperiod timeToNext = nextUpdate - time.time() if timeToNext > 0: time.sleep(timeToNext) return def TranslateAndTransmitt(self,data): timestamp = data['MeasuredAt'] #If this is old data ignore it. if timestamp == self.lasttimestamp: return self.lasttimestamp = timestamp datadic = {} for category in data: if type(data[category]) == type([]): for item in data[category]: #Abort if junk if item[u'value'] == None or item[u'value'] == "" or item[u'titleTranslationId'] == None: continue topic = prefix + "/" + category + "/" +item[u'titleTranslationId'] value = item[u'value'].replace(u"\xa0","") #Has this topic existed before if topic in self.oldvalues: #If yes do we have a new value? if self.oldvalues[topic] == value: #Same value ignore continue #Update update = json.dumps({"time":timestamp,"value":value}) self.publish(topic,update) #Save new value self.oldvalues[topic] = value return if __name__ == '__main__': #Where am I path = os.path.abspath(os.path.dirname(sys.argv[0])) #Load config file... try: ConfigFile = sys.argv[1] except: ConfigFile = path + "/NEEG2MQTT.conf" try: f = open(ConfigFile,"r") f.close() except: try: ConfigFile = path + "/NEEG2MQTT.conf" f = open(ConfigFile,"r") f.close() except: print "Please provide a valid config file! By argument or as default Plugwise2MQTT.cfg file." exit(1) config = ConfigParser.RawConfigParser(allow_no_value=True) config.read(ConfigFile) #Load basic config. ip = config.get("MQTTServer","Address") port = config.get("MQTTServer","Port") user = config.get("MQTTServer","User") password = config.get("MQTTServer","Password") prefix = config.get("MQTTServer","Prefix") #Create the data collector the power situation neeg2mqtt = NEEG_DataCollector(ip, port,"NEEG2MQTT", user, password) neeg2mqtt.RunCollection() Some change #!/usr/bin/python import sys,os import ConfigParser import mosquitto import json import requests import time class NEEG_DataCollector(mosquitto.Mosquitto): def __init__(self,ip = "localhost", port = 1883, clientId = "NEEG2MQTT", user = "driver", password = "1234", prefix = "ElectricGridData"): mosquitto.Mosquitto.__init__(self,clientId) self.prefix = prefix self.ip = ip self.port = port self.clientId = clientId self.user = user self.password = password if user != None: self.username_pw_set(user,password) self.will_set( topic = "system/" + self.prefix, payload="Offline", qos=1, retain=True) print "Connecting" self.connect(ip,keepalive=10) #self.subscribe(self.prefix + "/#", 0) self.on_connect = self.mqtt_on_connect self.on_message = self.mqtt_on_message self.publish(topic = "system/"+ self.prefix, payload="Online", qos=1, retain=True) self.lastupdate = 0 self.lasttimestamp = 0 self.updateperiod = 120 self.oldvalues = {} self.running = True #thread.start_new_thread(self.ControlLoop,()) self.loop_start() def mqtt_on_connect(self, selfX,mosq, result): print "MQTT connected!" #self.subscribe(self.prefix + "/#", 0) def mqtt_on_message(self, selfX,mosq, msg): #print("RECIEVED MQTT MESSAGE: "+msg.topic + " " + str(msg.payload)) return def RunCollection(self): while(self.running): #Mark the time now = time.time() #Get data r = requests.get('http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview') #If we failed retry in a while if r.status_code != 200: print "Failed to get data from Nordic Electric Energy grid" time.sleep(30) continue self.lastupdate = now data = r.json() self.TranslateAndTransmitt(data) nextUpdate = self.lastupdate + self.updateperiod timeToNext = nextUpdate - time.time() if timeToNext > 0: time.sleep(timeToNext) return def TranslateAndTransmitt(self,data): timestamp = data['MeasuredAt'] #If this is old data ignore it. if timestamp == self.lasttimestamp: return self.lasttimestamp = timestamp datadic = {} for category in data: if type(data[category]) == type([]): for item in data[category]: #Abort if junk if item[u'value'] == None or item[u'value'] == "" or item[u'titleTranslationId'] == None: continue topic = prefix + "/" + category + "/" +item[u'titleTranslationId'] value = item[u'value'].replace(u"\xa0","") #Has this topic existed before if topic in self.oldvalues: #If yes do we have a new value? if self.oldvalues[topic] == value: #Same value ignore continue #Update update = json.dumps({"time":timestamp,"value":value}) self.publish(topic,update) #Save new value self.oldvalues[topic] = value return if __name__ == '__main__': #Where am I path = os.path.abspath(os.path.dirname(sys.argv[0])) #Load config file... try: ConfigFile = sys.argv[1] except: ConfigFile = path + "/NEEG2MQTT.conf" try: f = open(ConfigFile,"r") f.close() except: try: ConfigFile = path + "/NEEG2MQTT.conf" f = open(ConfigFile,"r") f.close() except: print "Please provide a valid config file! By argument or as default Plugwise2MQTT.cfg file." exit(1) config = ConfigParser.RawConfigParser(allow_no_value=True) config.read(ConfigFile) #Load basic config. ip = config.get("MQTTServer","Address") port = config.get("MQTTServer","Port") user = config.get("MQTTServer","User") password = config.get("MQTTServer","Password") prefix = config.get("MQTTServer","Prefix") #Create the data collector the power situation neeg2mqtt = NEEG_DataCollector(ip, port,"NEEG2MQTT", user, password) neeg2mqtt.RunCollection()
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import platform class Vecgeom(CMakePackage): """The vectorized geometry library for particle-detector simulation (toolkits).""" homepage = "https://gitlab.cern.ch/VecGeom/VecGeom" url = "https://gitlab.cern.ch/api/v4/projects/VecGeom%2FVecGeom/repository/archive.tar.gz?sha=v0.3.rc" version('01.01.03', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.01.03', preferred=True) version('01.00.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.00.00') version('00.05.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v00.05.00') version('0.3.rc', sha256='a87a9ea4ab126b59ff9c79182bc0911ead3d76dd197194742e2a35ccd341299d') variant('cxxstd', default='17', values=('11', '14', '17'), multi=False, description='Use the specified C++ standard when building.') variant('vector', default='native', values=('sse3', 'sse4.2', 'native'), multi=False, description='Specify the instruction set for vectorization.') depends_on('cmake@3.5:', type='build') def cmake_args(self): options = [ '-DBACKEND=Scalar', '-DGEANT4=OFF', '-DUSOLIDS=ON', '-DUSOLIDS_VECGEOM=ON', '-DROOT=OFF', '-DNO_SPECIALIZATION=ON', '-DCMAKE_VERBOSE_MAKEFILE=TRUE'] options.append('-DCMAKE_CXX_STANDARD={0}'. format(self.spec.variants['cxxstd'].value)) arch = platform.machine() if arch == 'x86_64': options.append('-DVECGEOM_VECTOR={0}'. format(self.spec.variants['vector'].value)) else: options.append('-DVECGEOM_VECTOR=' + arch) return options Update and exetend VecGeom (#14520) * Add new vecgeom versions, add cuda support, automate target options * Add ROOT, GDML, and external VecCore support to VecGeom * Address reviewer comments * Update vecgeom for CUDA * Update versions # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Vecgeom(CMakePackage, CudaPackage): """The vectorized geometry library for particle-detector simulation (toolkits).""" homepage = "https://gitlab.cern.ch/VecGeom/VecGeom" url = "https://gitlab.cern.ch/VecGeom/VecGeom/-/archive/v1.1.6/VecGeom-v1.1.6.tar.gz" git = "https://gitlab.cern.ch/VecGeom/VecGeom.git" maintainers = ['drbenmorgan', 'sethrj'] version('master', branch='master') version('1.1.6', sha256='c4806a6b67d01b40074b8cc6865d78574a6a1c573be51696f2ecdf98b9cb954a') version('1.1.5', sha256='da674f3bbc75c30f56c1a2d251fa8930c899f27fa64b03a36569924030d87b95') version('1.1.3', sha256='ada09e8b6b2fa6c058290302b2cb5a6c2e644192aab1623c31d18c6a2f4c01c8') version('1.0.1', sha256='1eae7ac9014c608e8d8db5568058b8c0fea1a1dc7a8f54157a3a1c997b6fd9eb') version('0.5.2', tag='v00.05.02', commit='a7e0828c915ff936a79e672d1dd84b087a323b51') version('0.3.rc', sha256='a87a9ea4ab126b59ff9c79182bc0911ead3d76dd197194742e2a35ccd341299d') _cxxstd_values = ('11', '14', '17') variant('cxxstd', default='11', values=_cxxstd_values, multi=False, description='Use the specified C++ standard when building') variant('gdml', default=True, description='Support native GDML geometry descriptions') variant('geant4', default=False, description='Support Geant4 geometry construction') variant('root', default=False, description='Support ROOT geometry construction') variant('shared', default=True, description='Build shared libraries') depends_on('veccore@0.5.2:', type=('build', 'link'), when='@1.1.0:') depends_on('veccore@0.4.2', type=('build', 'link'), when='@:1.0') depends_on('veccore+cuda', type=('build', 'link'), when='+cuda') conflicts('+cuda', when='@:1.1.5') for std in _cxxstd_values: depends_on('geant4 cxxstd=' + std, when='+geant4 cxxstd=' + std) depends_on('root cxxstd=' + std, when='+root cxxstd=' + std) depends_on('veccore cxxstd=' + std, when='cxxstd=' + std) depends_on('xerces-c cxxstd=' + std, when='+gdml cxxstd=' + std) def cmake_args(self): # Possible target options are from the main CMakeLists.txt, assuming # "best" is last target = self.spec.target vecgeom_arch = "sse2 sse3 ssse3 sse4.1 sse4.2 avx avx2".split() for feature in reversed(vecgeom_arch): if feature.replace('.', '_') in target: target_instructions = feature break else: # No features available (could be 'generic' arch) target_instructions = 'empty' define = CMakePackage.define options = [ define('BACKEND', 'Scalar'), define('BUILTIN_VECCORE', False), define('NO_SPECIALIZATION', True), define('VECGEOM_VECTOR', target_instructions), self.define_from_variant('BUILD_SHARED_LIBS', 'shared'), self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'), self.define_from_variant('CUDA'), self.define_from_variant('GDML'), self.define_from_variant('GEANT4'), self.define_from_variant('ROOT'), ] # Set testing flags build_tests = self.run_tests options.extend([ define('BUILD_TESTING', build_tests), define('CTEST', build_tests), define('GDMLTESTING', build_tests and '+gdml' in self.spec), ]) if '+cuda' in self.spec: arch = self.spec.variants['cuda_arch'].value if len(arch) != 1 or arch[0] == 'none': raise InstallError("Exactly one cuda_arch must be specified") options.append(define('CUDA_ARCH', arch[0])) if self.spec.satisfies("@:0.5.2"): options.extend([ define('USOLIDS', True), define('USOLIDS_VECGEOM', True), ]) return options
from copy import copy from typing import List, Tuple from typing_extensions import Literal from gilgamesh.instruction import Instruction, InstructionID from gilgamesh.opcodes import AddressMode, Op from gilgamesh.state import State, StateChange class CPU: def __init__(self, log, pc: int, p: int, subroutine: int): self.log = log self.rom = log.rom self.pc = pc self.state = State(p) self.state_assertion = StateChange() self.state_change = StateChange() self.state_stack: List[Tuple[State, StateChange]] = [] self.subroutine = subroutine @property def instruction_id(self) -> InstructionID: return InstructionID(self.pc, self.state.p, self.subroutine) def copy(self, new_subroutine=False) -> "CPU": cpu = copy(self) cpu.state = copy(self.state) cpu.state_assertion = copy(self.state_assertion) cpu.state_change = StateChange() if new_subroutine else copy(self.state_change) return cpu def run(self) -> None: keep_going = self.step() while keep_going: keep_going = self.step() def step(self) -> bool: if self.is_ram(self.pc): return False if self.log.is_visited(self.instruction_id): return False opcode = self.rom.read_byte(self.pc) argument = self.rom.read_address(self.pc + 1) instruction = Instruction(self.log, *self.instruction_id, opcode, argument) self.log.add_instruction(instruction) return self.execute(instruction) def execute(self, instruction: Instruction) -> bool: self.pc += instruction.size self._derive_state_assertion(instruction) if instruction.is_return: self.log.add_subroutine_state(self.subroutine, self.state_change) return False elif instruction.is_interrupt: return self._unknown_subroutine_state() elif instruction.is_call: return self.call(instruction) elif instruction.is_jump: return self.jump(instruction) elif instruction.is_branch: self.branch(instruction) elif instruction.is_sep_rep: self.sep_rep(instruction) elif instruction.operation == Op.PHP: self.push_state() elif instruction.operation == Op.PLP: self.pop_state() return True def branch(self, instruction: Instruction) -> None: cpu = self.copy() cpu.run() target = instruction.absolute_argument assert target is not None self.log.add_reference(instruction, target) self.pc = target def call(self, instruction: Instruction) -> bool: target = instruction.absolute_argument if target is None: return self._unknown_subroutine_state() self.log.add_reference(instruction, target) self.log.add_subroutine(target) cpu = self.copy(new_subroutine=True) cpu.subroutine = target cpu.pc = target cpu.run() known = self._propagate_subroutine_state(target) if not known: return self._unknown_subroutine_state() return known def jump(self, instruction: Instruction) -> bool: target = instruction.absolute_argument if target is None: return self._unknown_subroutine_state() self.log.add_reference(instruction, target) self.pc = target return True def sep_rep(self, instruction: Instruction) -> None: arg = instruction.absolute_argument assert arg is not None if instruction.operation == Op.SEP: self.state.set(arg) self.state_change.set(arg) else: self.state.reset(arg) self.state_change.reset(arg) self.state_change.apply_assertion(self.state_assertion) def push_state(self) -> None: self.state_stack.append((copy(self.state), copy(self.state_change))) def pop_state(self) -> None: self.state, self.state_change = self.state_stack.pop() @staticmethod def is_ram(address: int) -> bool: return (address <= 0x001FFF) or (0x7E0000 <= address <= 0x7FFFFF) def _derive_state_assertion(self, instruction: Instruction) -> None: saved_assertion = self.log.state_assertions.get(instruction.pc) if saved_assertion: if saved_assertion.m is not None: self.state_assertion.m = saved_assertion.m if saved_assertion.x is not None: self.state_assertion.x = saved_assertion.x return if ( instruction.address_mode == AddressMode.IMMEDIATE_M and self.state_change.m is None ): self.state_assertion.m = self.state.m elif ( instruction.address_mode == AddressMode.IMMEDIATE_X and self.state_change.x is None ): self.state_assertion.x = self.state.x def _propagate_subroutine_state(self, subroutine: int) -> bool: state_changes = self.log.get_subroutine_states(subroutine) if len(state_changes) != 1: return False change = next(iter(state_changes)) if change.unknown: return False if change.m is not None: self.state_change.m = self.state.m = change.m if change.x is not None: self.state_change.x = self.state.x = change.x return True def _unknown_subroutine_state(self) -> Literal[False]: self.log.add_subroutine_state(self.subroutine, StateChange(unknown=True)) return False CPU comments from copy import copy from typing import List, Tuple from typing_extensions import Literal from gilgamesh.instruction import Instruction, InstructionID from gilgamesh.opcodes import AddressMode, Op from gilgamesh.state import State, StateChange class CPU: def __init__(self, log, pc: int, p: int, subroutine: int): self.log = log self.rom = log.rom # Processor state. self.pc = pc self.state = State(p) # Change in CPU state caused by the execution of the current subroutine. self.state_change = StateChange() # What we know about the CPU state based on the # sequence of instructions we have executed. self.state_assertion = StateChange() # Stack formed as a result of sequences of PHP/PLP instructions. self.state_stack: List[Tuple[State, StateChange]] = [] # The subroutine currently being executed. self.subroutine = subroutine @property def instruction_id(self) -> InstructionID: # Get the ID of the instruction currently being executed # in the context of the current subroutine. return InstructionID(self.pc, self.state.p, self.subroutine) def copy(self, new_subroutine=False) -> "CPU": # Copy the current state of the CPU. cpu = copy(self) cpu.state = copy(self.state) cpu.state_assertion = copy(self.state_assertion) # Don't carry over the state change information to new subroutines. cpu.state_change = StateChange() if new_subroutine else copy(self.state_change) return cpu def run(self) -> None: keep_going = self.step() while keep_going: keep_going = self.step() def step(self) -> bool: # We can't analyze code that lives in RAM. if self.is_ram(self.pc): return False # Don't visit the exact same instruction twice. if self.log.is_visited(self.instruction_id): return False opcode = self.rom.read_byte(self.pc) argument = self.rom.read_address(self.pc + 1) instruction = Instruction(self.log, *self.instruction_id, opcode, argument) self.log.add_instruction(instruction) return self.execute(instruction) def execute(self, instruction: Instruction) -> bool: self.pc += instruction.size # See if we can learn something about the *required* # state of the CPU based on the current instruction. self._derive_state_assertion(instruction) if instruction.is_return: self.log.add_subroutine_state(self.subroutine, self.state_change) return False # Terminate the execution of this subroutine. elif instruction.is_interrupt: return self._unknown_subroutine_state() elif instruction.is_call: return self.call(instruction) elif instruction.is_jump: return self.jump(instruction) elif instruction.is_branch: self.branch(instruction) elif instruction.is_sep_rep: self.sep_rep(instruction) elif instruction.operation == Op.PHP: self.push_state() elif instruction.operation == Op.PLP: self.pop_state() return True # Keep executing in the context of this subroutine. def branch(self, instruction: Instruction) -> None: # Run a parallel instance of the CPU to follow # the case in which we don't take the branch. cpu = self.copy() cpu.run() target = instruction.absolute_argument assert target is not None # Log the fact that the current instruction references the # instruction pointed by the branch. Then take the branch. self.log.add_reference(instruction, target) self.pc = target def call(self, instruction: Instruction) -> bool: target = instruction.absolute_argument if target is None: # If we can't reliably derive the address of the subroutine # being called, we're left in an unknown state. return self._unknown_subroutine_state() self.log.add_reference(instruction, target) self.log.add_subroutine(target) # Run a parallel instance of the CPU to execute # the subroutine that is being called. cpu = self.copy(new_subroutine=True) cpu.subroutine = target cpu.pc = target cpu.run() # If we univocally know what the return state of the # called subroutine is, we can propagate it to the # current CPU state. Otherwise, to be on the safe # side, we need to stop the execution. known = self._propagate_subroutine_state(target) if not known: return self._unknown_subroutine_state() return True def jump(self, instruction: Instruction) -> bool: target = instruction.absolute_argument if target is None: return self._unknown_subroutine_state() self.log.add_reference(instruction, target) self.pc = target return True def sep_rep(self, instruction: Instruction) -> None: arg = instruction.absolute_argument assert arg is not None if instruction.operation == Op.SEP: self.state.set(arg) self.state_change.set(arg) else: self.state.reset(arg) self.state_change.reset(arg) # Simplify the state change by applying our knowledge # of the current state. I.e. if we know that the # processor is operating in 8-bits accumulator mode # and we switch to that same mode, effectively no # state change is being performed. self.state_change.apply_assertion(self.state_assertion) def push_state(self) -> None: self.state_stack.append((copy(self.state), copy(self.state_change))) def pop_state(self) -> None: self.state, self.state_change = self.state_stack.pop() @staticmethod def is_ram(address: int) -> bool: return (address <= 0x001FFF) or (0x7E0000 <= address <= 0x7FFFFF) def _derive_state_assertion(self, instruction: Instruction) -> None: # If we have defined a custom assertion for the # current instruction, retrieve it and apply it. saved_assertion = self.log.state_assertions.get(instruction.pc) if saved_assertion: if saved_assertion.m is not None: self.state_assertion.m = saved_assertion.m if saved_assertion.x is not None: self.state_assertion.x = saved_assertion.x return # If we're executing an instruction with a certain operand size, # and no state change has been performed in the current subroutine, # then we can infer that the state of the processor as we enter # the subroutine *must* be the same in all cases. if ( instruction.address_mode == AddressMode.IMMEDIATE_M and self.state_change.m is None ): self.state_assertion.m = self.state.m elif ( instruction.address_mode == AddressMode.IMMEDIATE_X and self.state_change.x is None ): self.state_assertion.x = self.state.x def _propagate_subroutine_state(self, subroutine: int) -> bool: # If the subroutine can return in more than one distinct state, # we can't reliably propagate the state to the caller. state_changes = self.log.get_subroutine_states(subroutine) if len(state_changes) != 1: return False change = next(iter(state_changes)) if change.unknown: return False if change.m is not None: self.state_change.m = self.state.m = change.m if change.x is not None: self.state_change.x = self.state.x = change.x return True def _unknown_subroutine_state(self) -> Literal[False]: self.log.add_subroutine_state(self.subroutine, StateChange(unknown=True)) return False
#!/usr/bin/python from __future__ import division import copy """ <svg> <polyline points="0,0 50,0 150,100 250,100 300,150" fill="rgb(0,249,249)" stroke-width="0" stroke="rgb(0,0,0)" /> </svg> """ def round_point(point): return [int(point[0]), int(point[1])] def scale_point(point, k): return [k[0]*point[0], k[1]*point[1]] def shift_point(point, offset): return [point[0]+offset[0], point[1]+offset[1]] def round_points(points): return [round_point(p) for p in points] def scale_points(points, k): return [scale_point(p, k) for p in points] def shift_points(points, offset): return [shift_point(p, offset) for p in points] def get_square(): return [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]] def get_rectangular(w, h): return scale_points(get_square(), [w, h]) def get_color(text): t = { 'white': '#fff', 'black': '#000', 'red': '#f00', 'green': '#0f0', 'blue': '#00f' } return t.get(text, text) def close_points(points): res = points[:] first = res[0] last = res[-1] res.append([last[0], 0]) res.append([first[0], 0]) res.append(first) return res def tostr(point): return '({0}, {1})'.format(point[0], point[1]) class BoundingBox: def __init__(self, point=None): if point is None: self.left_bottom = None self.right_top = None else: self.left_bottom = point[:] self.right_top = point[:] def include(self, point): if self.left_bottom is None or self.right_top is None: self.left_bottom = point[:] self.right_top = point[:] return self.left_bottom[0] = min(self.left_bottom[0], point[0]) self.left_bottom[1] = min(self.left_bottom[1], point[1]) self.right_top[0] = max(self.right_top[0], point[0]) self.right_top[1] = max(self.right_top[1], point[1]) def width(self): return self.right_top[0] - self.left_bottom[0] def height(self): return self.right_top[1] - self.left_bottom[1] class Chart: def __init__(self, width=650, height=75, padding_top=10, padding_bottom=20, padding_left=20, padding_right=40, inverseX=False, xlabels=[], ylabels=[]): self.width = width self.height = height self.padding_top = padding_top self.padding_bottom = padding_bottom self.padding_left = padding_left self.padding_right = padding_right self.inverseX = inverseX self.traces = [] self.texts = [] self.canvas = BoundingBox([0, 0]) self.xlabels = xlabels self.ylabels = ylabels self.is_axes_on_top = False #self.add_frame() # TODO: remove self.add_background() self.add_labels() self.add_axes() def add_frame(self): points = get_rectangular(self.width-2, self.height-2) points = shift_points(points, [1, 1]) data = {} data['points'] = points data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#ddd' data['atr']['shape-rendering'] = 'crispEdges' self.traces.append(data) def add_background(self): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom points = get_rectangular(w, h) padding = [self.padding_left, self.padding_bottom] points = shift_points(points, padding) data = {} data['points'] = points data['atr'] = {} data['atr']['fill'] = get_color('white') self.traces.append(data) def add_axes(self): d = 3 w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom padding = [self.padding_left, self.padding_bottom] data = {} data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#999' data['atr']['shape-rendering'] = 'crispEdges' dx0 = 1 if self.inverseX else d dx1 = d if self.inverseX else 0 # X top data = copy.deepcopy(data) points = [[-dx0, h], [w+dx1, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # X bottom data = copy.deepcopy(data) points = [[-dx0, 0], [w+dx1, 0]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # Y left data = copy.deepcopy(data) points = [[0, -d], [0, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # Y right data = copy.deepcopy(data) points = [[w, -d], [w, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) def add_labels(self): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom padding = [self.padding_left, self.padding_bottom] data = {} data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#ddd' data['atr']['shape-rendering'] = 'crispEdges' if self.is_axes_on_top: data['atr']['stroke-dasharray'] = '1, 5' text = {} text['atr'] = {} text['atr']['font-family'] = 'Verdana' text['atr']['font-size'] = 10 text['atr']['fill'] = '#777' yn = len(self.ylabels) step = h / (yn-1) for i in xrange(0, yn): data = copy.deepcopy(data) y = i * step points = [[0, y], [w, y]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) x = w+5 if self.inverseX else -5 text = copy.deepcopy(text) text['text'] = self.ylabels[i] point = shift_point([x, y-3], padding) point = self.to_real_coords(point) text['atr']['x'] = point[0] text['atr']['y'] = point[1] text['atr']['text-anchor'] = 'start' if self.inverseX else 'end' self.texts.append(text) xn = len(self.xlabels) step = w / (xn-1) for i in xrange(0, xn): data = copy.deepcopy(data) x = i * step points = [[x, 0], [x, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) text = copy.deepcopy(text) j = xn - i - 1 if self.inverseX else i text['text'] = self.xlabels[j] point = shift_point([x, -15], padding) point = self.to_real_coords(point) text['atr']['x'] = point[0] text['atr']['y'] = point[1] text['atr']['text-anchor'] = 'middle' self.texts.append(text) def add(self, ys, xs=None, stroke_width=1, stroke='black', fill='none'): ny = len(ys) if xs is None: xs = range(0, ny) n = min(len(xs), ny) xs = xs[:n] ys = ys[:n] points = [[xs[i], ys[i]] for i in xrange(0, n)] if fill != 'none': self.is_axes_on_top = True points = close_points(points) for p in points: self.canvas.include(p) data = {} data['canvas'] = points data['atr'] = {} data['atr']['fill'] = fill data['atr']['stroke-width'] = stroke_width data['atr']['stroke'] = get_color(stroke) self.traces.append(data) def canvas_to_points(self, canvas): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom xk = w / self.canvas.width() yk = h / self.canvas.height() if self.inverseX: canvas = shift_points(canvas, [-self.canvas.width(), 0]) canvas = scale_points(canvas, [-1, 1]) points = scale_points(canvas, [xk, yk]) points = round_points(points) padding = [self.padding_left, self.padding_bottom] points = shift_points(points, padding) return points def to_real_coords(self, point): point = shift_point(point, [0, -self.height]) point = scale_point(point, [1, -1]) return point def render_points(self, points): points = [self.to_real_coords(p) for p in points] coords = ['{0},{1}'.format(p[0], p[1]) for p in points] pts = ' '.join(coords) return '\tpoints="{0}"'.format(pts) def render_trace(self, trace): res = [] res.append('<polyline') canvas = trace.get('canvas') if canvas: trace['points'] = self.canvas_to_points(canvas) points = trace.get('points') if points: res.append(self.render_points(points)) atr = trace.get('atr') if atr: for a in atr: res.append('\t{0}=\"{1}\"'.format(a, atr[a])) res.append('/>') return res def render_text(self, text): res = [] res.append('<text') atr = text.get('atr') if atr: for a in atr: res.append('\t{0}=\"{1}\"'.format(a, atr[a])) res.append('>') res.append(str(text['text'])) res.append('</text>') return res def render(self): if self.is_axes_on_top: self.add_labels() self.add_axes() svg = [] svg.append('<svg>') for t in self.traces: svg.extend(self.render_trace(t)) for t in self.texts: svg.extend(self.render_text(t)) svg.append('</svg>') return svg def render_to_svg(self, filepath): with open(filepath, 'w') as f: lines = self.render() for line in lines: f.write(line + '\n') def main(): xlabels = [0, 2, 4, 6, 8, 10, '12 hours'] ylabels = ['0 %', '50 %', '100 %'] chart = Chart(inverseX=True, xlabels=xlabels, ylabels=ylabels, height=400) #import random #ys = [random.randrange(0, 100) for i in xrange(200)] # c90c28 dark red # 2e7eb3 blue # fa730c orange # 4aa635 green color = '#2e7eb3' ys = [10, 60, 60] xs = [10, 20, 30] chart.add(xs=xs, ys=ys, stroke=color, fill=color) color = '#4aa635' ys = [40, 70, 100, 98] xs = [30, 40, 50, 60] chart.add(xs=xs, ys=ys, stroke=color, fill=color) chart.render_to_svg('test.svg') if __name__ == '__main__': main() refactoring, formatting #!/usr/bin/python from __future__ import division import copy def round_point(point): return [int(point[0]), int(point[1])] def scale_point(point, k): return [k[0]*point[0], k[1]*point[1]] def shift_point(point, offset): return [point[0]+offset[0], point[1]+offset[1]] def round_points(points): return [round_point(p) for p in points] def scale_points(points, k): return [scale_point(p, k) for p in points] def shift_points(points, offset): return [shift_point(p, offset) for p in points] def get_square(): return [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]] def get_rectangular(w, h): return scale_points(get_square(), [w, h]) def get_color(text): t = { 'white': '#fff', 'black': '#000', 'red': '#f00', 'green': '#0f0', 'blue': '#00f' } return t.get(text, text) def close_points(points): res = points[:] first = res[0] last = res[-1] res.append([last[0], 0]) res.append([first[0], 0]) res.append(first) return res def tostr(point): return '({0}, {1})'.format(point[0], point[1]) class BoundingBox: def __init__(self, point=None): if point is None: self.left_bottom = None self.right_top = None else: self.left_bottom = point[:] self.right_top = point[:] def include(self, point): if self.left_bottom is None or self.right_top is None: self.left_bottom = point[:] self.right_top = point[:] return self.left_bottom[0] = min(self.left_bottom[0], point[0]) self.left_bottom[1] = min(self.left_bottom[1], point[1]) self.right_top[0] = max(self.right_top[0], point[0]) self.right_top[1] = max(self.right_top[1], point[1]) def width(self): return self.right_top[0] - self.left_bottom[0] def height(self): return self.right_top[1] - self.left_bottom[1] class Chart: def __init__(self, width=650, height=75, padding_top=10, padding_bottom=20, padding_left=20, padding_right=40, inverseX=False, xlabels=[], ylabels=[]): self.width = width self.height = height self.padding_top = padding_top self.padding_bottom = padding_bottom self.padding_left = padding_left self.padding_right = padding_right self.inverseX = inverseX self.traces = [] self.texts = [] self.canvas = BoundingBox([0, 0]) self.xlabels = xlabels self.ylabels = ylabels self.is_axes_on_top = False #self.add_frame() # TODO: remove self.add_background() self.add_labels() self.add_axes() def add_frame(self): points = get_rectangular(self.width-2, self.height-2) points = shift_points(points, [1, 1]) data = {} data['points'] = points data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#ddd' data['atr']['shape-rendering'] = 'crispEdges' self.traces.append(data) def add_background(self): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom points = get_rectangular(w, h) padding = [self.padding_left, self.padding_bottom] points = shift_points(points, padding) data = {} data['points'] = points data['atr'] = {} data['atr']['fill'] = get_color('white') self.traces.append(data) def add_axes(self): d = 3 w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom padding = [self.padding_left, self.padding_bottom] data = {} data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#999' data['atr']['shape-rendering'] = 'crispEdges' dx0 = 1 if self.inverseX else d dx1 = d if self.inverseX else 0 # X top data = copy.deepcopy(data) points = [[-dx0, h], [w+dx1, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # X bottom data = copy.deepcopy(data) points = [[-dx0, 0], [w+dx1, 0]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # Y left data = copy.deepcopy(data) points = [[0, -d], [0, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) # Y right data = copy.deepcopy(data) points = [[w, -d], [w, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) def add_labels(self): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom padding = [self.padding_left, self.padding_bottom] data = {} data['atr'] = {} data['atr']['fill'] = 'none' data['atr']['stroke-width'] = 1 data['atr']['stroke'] = '#ddd' data['atr']['shape-rendering'] = 'crispEdges' if self.is_axes_on_top: data['atr']['stroke-dasharray'] = '1, 5' text = {} text['atr'] = {} text['atr']['font-family'] = 'Verdana' text['atr']['font-size'] = 10 text['atr']['fill'] = '#777' yn = len(self.ylabels) step = h / (yn-1) for i in xrange(0, yn): data = copy.deepcopy(data) y = i * step points = [[0, y], [w, y]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) x = w+5 if self.inverseX else -5 text = copy.deepcopy(text) text['text'] = self.ylabels[i] point = shift_point([x, y-3], padding) point = self.to_real_coords(point) text['atr']['x'] = point[0] text['atr']['y'] = point[1] text['atr']['text-anchor'] = 'start' if self.inverseX else 'end' self.texts.append(text) xn = len(self.xlabels) step = w / (xn-1) for i in xrange(0, xn): data = copy.deepcopy(data) x = i * step points = [[x, 0], [x, h]] points = shift_points(points, padding) data['points'] = points self.traces.append(data) text = copy.deepcopy(text) j = xn - i - 1 if self.inverseX else i text['text'] = self.xlabels[j] point = shift_point([x, -15], padding) point = self.to_real_coords(point) text['atr']['x'] = point[0] text['atr']['y'] = point[1] text['atr']['text-anchor'] = 'middle' self.texts.append(text) def add(self, ys, xs=None, stroke_width=1, stroke='black', fill='none'): ny = len(ys) if xs is None: xs = range(0, ny) n = min(len(xs), ny) xs = xs[:n] ys = ys[:n] points = [[xs[i], ys[i]] for i in xrange(0, n)] if fill != 'none': self.is_axes_on_top = True points = close_points(points) for p in points: self.canvas.include(p) data = {} data['canvas'] = points data['atr'] = {} data['atr']['fill'] = fill data['atr']['stroke-width'] = stroke_width data['atr']['stroke'] = get_color(stroke) self.traces.append(data) def canvas_to_points(self, canvas): w = self.width - self.padding_left - self.padding_right h = self.height - self.padding_top - self.padding_bottom xk = w / self.canvas.width() yk = h / self.canvas.height() if self.inverseX: canvas = shift_points(canvas, [-self.canvas.width(), 0]) canvas = scale_points(canvas, [-1, 1]) points = scale_points(canvas, [xk, yk]) points = round_points(points) padding = [self.padding_left, self.padding_bottom] points = shift_points(points, padding) return points def to_real_coords(self, point): point = shift_point(point, [0, -self.height]) point = scale_point(point, [1, -1]) return point def render_points(self, points): points = [self.to_real_coords(p) for p in points] coords = ['{0},{1}'.format(p[0], p[1]) for p in points] pts = ' '.join(coords) return '\tpoints="{0}"'.format(pts) def render_trace(self, trace): res = [] res.append('<polyline') canvas = trace.get('canvas') if canvas: trace['points'] = self.canvas_to_points(canvas) points = trace.get('points') if points: res.append(self.render_points(points)) atr = trace.get('atr') if atr: for a in atr: res.append('\t{0}=\"{1}\"'.format(a, atr[a])) res.append('/>') return res def render_text(self, text): res = [] res.append('<text') atr = text.get('atr') if atr: for a in atr: res.append('\t{0}=\"{1}\"'.format(a, atr[a])) res.append('>') res.append(str(text['text'])) res.append('</text>') return res def render(self): if self.is_axes_on_top: self.add_labels() self.add_axes() svg = [] svg.append('<svg>') for t in self.traces: svg.extend(self.render_trace(t)) for t in self.texts: svg.extend(self.render_text(t)) svg.append('</svg>') return svg def render_to_svg(self, filepath): with open(filepath, 'w') as f: lines = self.render() for line in lines: f.write(line + '\n') def main(): xlabels = [0, 2, 4, 6, 8, 10, '12 hours'] ylabels = ['0 %', '50 %', '100 %'] chart = Chart(inverseX=True, xlabels=xlabels, ylabels=ylabels, height=400) #import random #ys = [random.randrange(0, 100) for i in xrange(200)] # c90c28 dark red # 2e7eb3 blue # fa730c orange # 4aa635 green color = '#2e7eb3' ys = [10, 60, 60] xs = [10, 20, 30] chart.add(xs=xs, ys=ys, stroke=color, fill=color) color = '#4aa635' ys = [40, 70, 100, 98] xs = [30, 40, 50, 60] chart.add(xs=xs, ys=ys, stroke=color, fill=color) chart.render_to_svg('test.svg') if __name__ == '__main__': main()
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class to represent an AWS Virtual Machine object. Images: aws ec2 describe-images --owners self amazon All VM specifics are self-contained and the class provides methods to operate on the VM: boot, shutdown, etc. """ import base64 import collections import json import logging import posixpath import re import threading import time import uuid from absl import flags from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import linux_virtual_machine from perfkitbenchmarker import placement_group from perfkitbenchmarker import providers from perfkitbenchmarker import resource from perfkitbenchmarker import virtual_machine from perfkitbenchmarker import vm_util from perfkitbenchmarker import windows_virtual_machine from perfkitbenchmarker.configs import option_decoders from perfkitbenchmarker.providers.aws import aws_disk from perfkitbenchmarker.providers.aws import aws_network from perfkitbenchmarker.providers.aws import util from six.moves import range FLAGS = flags.FLAGS HVM = 'hvm' PV = 'paravirtual' NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2'] NON_PLACEMENT_GROUP_PREFIXES = frozenset( ['t2', 'm3', 't3', 't3a', 't4g', 'vt1']) DRIVE_START_LETTER = 'b' TERMINATED = 'terminated' SHUTTING_DOWN = 'shutting-down' INSTANCE_EXISTS_STATUSES = frozenset(['running', 'stopping', 'stopped']) INSTANCE_DELETED_STATUSES = frozenset([SHUTTING_DOWN, TERMINATED]) INSTANCE_TRANSITIONAL_STATUSES = frozenset(['pending']) INSTANCE_KNOWN_STATUSES = (INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES | INSTANCE_TRANSITIONAL_STATUSES) HOST_EXISTS_STATES = frozenset( ['available', 'under-assessment', 'permanent-failure']) HOST_RELEASED_STATES = frozenset(['released', 'released-permanent-failure']) KNOWN_HOST_STATES = HOST_EXISTS_STATES | HOST_RELEASED_STATES AWS_INITIATED_SPOT_TERMINATING_TRANSITION_STATUSES = frozenset( ['marked-for-termination', 'marked-for-stop']) AWS_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['instance-terminated-by-price', 'instance-terminated-by-service', 'instance-terminated-no-capacity', 'instance-terminated-capacity-oversubscribed', 'instance-terminated-launch-group-constraint']) USER_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['request-canceled-and-instance-running', 'instance-terminated-by-user']) # These are the project numbers of projects owning common images. # Some numbers have corresponding owner aliases, but they are not used here. AMAZON_LINUX_IMAGE_PROJECT = [ '137112412989', # alias amazon most regions '210953353124', # alias amazon for af-south-1 '910595266909', # alias amazon for ap-east-1 '071630900071', # alias amazon for eu-south-1 ] # From https://wiki.debian.org/Cloud/AmazonEC2Image/Stretch # Marketplace AMI exists, but not in all regions DEBIAN_9_IMAGE_PROJECT = ['379101102735'] # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster # From https://wiki.debian.org/Cloud/AmazonEC2Image/Bullseye DEBIAN_IMAGE_PROJECT = ['136693071363'] # Owns AMIs lists here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images # Also owns the AMIS listed in # https://builds.coreos.fedoraproject.org/streams/stable.json CENTOS_IMAGE_PROJECT = ['125523088429'] MARKETPLACE_IMAGE_PROJECT = ['679593333241'] # alias aws-marketplace # https://access.redhat.com/articles/2962171 RHEL_IMAGE_PROJECT = ['309956199498'] # https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29 UBUNTU_IMAGE_PROJECT = ['099720109477'] # Owned by canonical # Some Windows images are also available in marketplace project, but this is the # one selected by the AWS console. WINDOWS_IMAGE_PROJECT = ['801119661308'] # alias amazon UBUNTU_EFA_IMAGE_PROJECT = ['898082745236'] # Processor architectures ARM = 'arm64' X86 = 'x86_64' # Machine type to ARM architecture. _MACHINE_TYPE_PREFIX_TO_ARM_ARCH = { 'a1': 'cortex-a72', 'c6g': 'graviton2', 'g5g': 'graviton2', 'm6g': 'graviton2', 'r6g': 'graviton2', 't4g': 'graviton2', 'im4g': 'graviton2', 'is4ge': 'graviton2', 'x2g': 'graviton2', } # Parameters for use with Elastic Fiber Adapter _EFA_PARAMS = { 'InterfaceType': 'efa', 'DeviceIndex': 0, 'NetworkCardIndex': 0, 'Groups': '', 'SubnetId': '' } # Location of EFA installer _EFA_URL = ('https://s3-us-west-2.amazonaws.com/aws-efa-installer/' 'aws-efa-installer-{version}.tar.gz') class AwsTransitionalVmRetryableError(Exception): """Error for retrying _Exists when an AWS VM is in a transitional state.""" class AwsDriverDoesntSupportFeatureError(Exception): """Raised if there is an attempt to set a feature not supported.""" class AwsUnexpectedWindowsAdapterOutputError(Exception): """Raised when querying the status of a windows adapter failed.""" class AwsUnknownStatusError(Exception): """Error indicating an unknown status was encountered.""" class AwsImageNotFoundError(Exception): """Error indicating no appropriate AMI could be found.""" def GetRootBlockDeviceSpecForImage(image_id, region): """Queries the CLI and returns the root block device specification as a dict. Args: image_id: The EC2 image id to query region: The EC2 region in which the image resides Returns: The root block device specification as returned by the AWS cli, as a Python dict. If the image is not found, or if the response is malformed, an exception will be raised. """ command = util.AWS_PREFIX + [ 'ec2', 'describe-images', '--region=%s' % region, '--image-ids=%s' % image_id, '--query', 'Images[]'] stdout, _ = util.IssueRetryableCommand(command) images = json.loads(stdout) assert images assert len(images) == 1, ( 'Expected to receive only one image description for %s' % image_id) image_spec = images[0] root_device_name = image_spec['RootDeviceName'] block_device_mappings = image_spec['BlockDeviceMappings'] root_block_device_dict = next((x for x in block_device_mappings if x['DeviceName'] == root_device_name)) return root_block_device_dict def GetBlockDeviceMap(machine_type, root_volume_size_gb=None, image_id=None, region=None): """Returns the block device map to expose all devices for a given machine. Args: machine_type: The machine type to create a block device map for. root_volume_size_gb: The desired size of the root volume, in GiB, or None to the default provided by AWS. image_id: The image id (AMI) to use in order to lookup the default root device specs. This is only required if root_volume_size is specified. region: The region which contains the specified image. This is only required if image_id is specified. Returns: The json representation of the block device map for a machine compatible with the AWS CLI, or if the machine type has no local disks, it will return None. If root_volume_size_gb and image_id are provided, the block device map will include the specification for the root volume. Raises: ValueError: If required parameters are not passed. """ mappings = [] if root_volume_size_gb is not None: if image_id is None: raise ValueError( 'image_id must be provided if root_volume_size_gb is specified') if region is None: raise ValueError( 'region must be provided if image_id is specified') root_block_device = GetRootBlockDeviceSpecForImage(image_id, region) root_block_device['Ebs']['VolumeSize'] = root_volume_size_gb # The 'Encrypted' key must be removed or the CLI will complain if not FLAGS.aws_vm_hibernate: root_block_device['Ebs'].pop('Encrypted') else: root_block_device['Ebs']['Encrypted'] = True mappings.append(root_block_device) if (machine_type in aws_disk.NUM_LOCAL_VOLUMES and not aws_disk.LocalDriveIsNvme(machine_type)): for i in range(aws_disk.NUM_LOCAL_VOLUMES[machine_type]): od = collections.OrderedDict() od['VirtualName'] = 'ephemeral%s' % i od['DeviceName'] = '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i) mappings.append(od) if mappings: return json.dumps(mappings) return None def IsPlacementGroupCompatible(machine_type): """Returns True if VMs of 'machine_type' can be put in a placement group.""" prefix = machine_type.split('.')[0] return prefix not in NON_PLACEMENT_GROUP_PREFIXES def GetArmArchitecture(machine_type): """Returns the specific ARM processor architecture of the VM.""" # c6g.medium -> c6g, m6gd.large -> m6g, c5n.18xlarge -> c5 prefix = re.split(r'[dn]?\.', machine_type)[0] return _MACHINE_TYPE_PREFIX_TO_ARM_ARCH.get(prefix) def GetProcessorArchitecture(machine_type): """Returns the processor architecture of the VM.""" if GetArmArchitecture(machine_type): return ARM else: return X86 class AwsDedicatedHost(resource.BaseResource): """Object representing an AWS host. Attributes: region: The AWS region of the host. zone: The AWS availability zone of the host. machine_type: The machine type of VMs that may be created on the host. client_token: A uuid that makes the creation request idempotent. id: The host_id of the host. """ def __init__(self, machine_type, zone): super(AwsDedicatedHost, self).__init__() self.machine_type = machine_type self.zone = zone self.region = util.GetRegionFromZone(self.zone) self.client_token = str(uuid.uuid4()) self.id = None self.fill_fraction = 0.0 def _Create(self): create_cmd = util.AWS_PREFIX + [ 'ec2', 'allocate-hosts', '--region=%s' % self.region, '--client-token=%s' % self.client_token, '--instance-type=%s' % self.machine_type, '--availability-zone=%s' % self.zone, '--auto-placement=off', '--quantity=1'] vm_util.IssueCommand(create_cmd) def _Delete(self): if self.id: delete_cmd = util.AWS_PREFIX + [ 'ec2', 'release-hosts', '--region=%s' % self.region, '--host-ids=%s' % self.id] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) @vm_util.Retry() def _Exists(self): describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-hosts', '--region=%s' % self.region, '--filter=Name=client-token,Values=%s' % self.client_token] stdout, _, _ = vm_util.IssueCommand(describe_cmd) response = json.loads(stdout) hosts = response['Hosts'] assert len(hosts) < 2, 'Too many hosts.' if not hosts: return False host = hosts[0] self.id = host['HostId'] state = host['State'] assert state in KNOWN_HOST_STATES, state return state in HOST_EXISTS_STATES class AwsVmSpec(virtual_machine.BaseVmSpec): """Object containing the information needed to create an AwsVirtualMachine. Attributes: use_dedicated_host: bool. Whether to create this VM on a dedicated host. """ CLOUD = providers.AWS @classmethod def _ApplyFlags(cls, config_values, flag_values): """Modifies config options based on runtime flag values. Can be overridden by derived classes to add support for specific flags. Args: config_values: dict mapping config option names to provided values. May be modified by this function. flag_values: flags.FlagValues. Runtime flags that may override the provided config values. """ super(AwsVmSpec, cls)._ApplyFlags(config_values, flag_values) if flag_values['aws_boot_disk_size'].present: config_values['boot_disk_size'] = flag_values.aws_boot_disk_size if flag_values['aws_spot_instances'].present: config_values['use_spot_instance'] = flag_values.aws_spot_instances if flag_values['aws_spot_price'].present: config_values['spot_price'] = flag_values.aws_spot_price if flag_values['aws_spot_block_duration_minutes'].present: config_values['spot_block_duration_minutes'] = int( flag_values.aws_spot_block_duration_minutes) @classmethod def _GetOptionDecoderConstructions(cls): """Gets decoder classes and constructor args for each configurable option. Returns: dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair. The pair specifies a decoder class and its __init__() keyword arguments to construct in order to decode the named option. """ result = super(AwsVmSpec, cls)._GetOptionDecoderConstructions() result.update({ 'use_spot_instance': (option_decoders.BooleanDecoder, { 'default': False }), 'spot_price': (option_decoders.FloatDecoder, { 'default': None }), 'spot_block_duration_minutes': (option_decoders.IntDecoder, { 'default': None }), 'boot_disk_size': (option_decoders.IntDecoder, { 'default': None }) }) return result def _GetKeyfileSetKey(region): """Returns a key to use for the keyfile set. This prevents other runs in the same process from reusing the key. Args: region: The region the keyfile is in. """ return (region, FLAGS.run_uri) class AwsKeyFileManager(object): """Object for managing AWS Keyfiles.""" _lock = threading.Lock() imported_keyfile_set = set() deleted_keyfile_set = set() @classmethod def ImportKeyfile(cls, region): """Imports the public keyfile to AWS.""" with cls._lock: if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: return cat_cmd = ['cat', vm_util.GetPublicKeyPath()] keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd) formatted_tags = util.FormatTagSpecifications('key-pair', util.MakeDefaultTags()) import_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'import-key-pair', '--key-name=%s' % cls.GetKeyNameForRun(), '--public-key-material=%s' % keyfile, '--tag-specifications=%s' % formatted_tags, ] _, stderr, retcode = vm_util.IssueCommand( import_cmd, raise_on_failure=False) if retcode: if 'KeyPairLimitExceeded' in stderr: raise errors.Benchmarks.QuotaFailure( 'KeyPairLimitExceeded in %s: %s' % (region, stderr)) else: raise errors.Benchmarks.PrepareException(stderr) cls.imported_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: cls.deleted_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def DeleteKeyfile(cls, region): """Deletes the imported keyfile for a region.""" with cls._lock: if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: return delete_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'delete-key-pair', '--key-name=%s' % cls.GetKeyNameForRun()] util.IssueRetryableCommand(delete_cmd) cls.deleted_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: cls.imported_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def GetKeyNameForRun(cls): return 'perfkit-key-{0}'.format(FLAGS.run_uri) class AwsVirtualMachine(virtual_machine.BaseVirtualMachine): """Object representing an AWS Virtual Machine.""" CLOUD = providers.AWS # The IMAGE_NAME_FILTER is passed to the AWS CLI describe-images command to # filter images by name. This must be set by subclasses, but may be overridden # by the aws_image_name_filter flag. IMAGE_NAME_FILTER = None # The IMAGE_NAME_REGEX can be used to further filter images by name. It # applies after the IMAGE_NAME_FILTER above. Note that before this regex is # applied, Python's string formatting is used to replace {virt_type} and # {disk_type} by the respective virtualization type and root disk type of the # VM, allowing the regex to contain these strings. This regex supports # arbitrary Python regular expressions to further narrow down the set of # images considered. IMAGE_NAME_REGEX = None # List of projects that own the AMIs of this OS type. Default to # AWS Marketplace official image project. Note that opt-in regions may have a # different image owner than default regions. IMAGE_OWNER = MARKETPLACE_IMAGE_PROJECT # Some AMIs use a project code to find the latest (in addition to owner, and # filter) IMAGE_PRODUCT_CODE_FILTER = None # CoreOS only distinguishes between stable and testing images in the # description IMAGE_DESCRIPTION_FILTER = None DEFAULT_ROOT_DISK_TYPE = 'gp2' DEFAULT_USER_NAME = 'ec2-user' _lock = threading.Lock() deleted_hosts = set() host_map = collections.defaultdict(list) def __init__(self, vm_spec): """Initialize a AWS virtual machine. Args: vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm. Raises: ValueError: If an incompatible vm_spec is passed. """ super(AwsVirtualMachine, self).__init__(vm_spec) self.region = util.GetRegionFromZone(self.zone) self.user_name = FLAGS.aws_user_name or self.DEFAULT_USER_NAME if self.machine_type in aws_disk.NUM_LOCAL_VOLUMES: self.max_local_disks = aws_disk.NUM_LOCAL_VOLUMES[self.machine_type] self.user_data = None self.network = aws_network.AwsNetwork.GetNetwork(self) self.placement_group = getattr(vm_spec, 'placement_group', self.network.placement_group) self.firewall = aws_network.AwsFirewall.GetFirewall() self.use_dedicated_host = vm_spec.use_dedicated_host self.num_vms_per_host = vm_spec.num_vms_per_host self.use_spot_instance = vm_spec.use_spot_instance self.spot_price = vm_spec.spot_price self.spot_block_duration_minutes = vm_spec.spot_block_duration_minutes self.boot_disk_size = vm_spec.boot_disk_size self.client_token = str(uuid.uuid4()) self.host = None self.id = None self.metadata.update({ 'spot_instance': self.use_spot_instance, 'spot_price': self.spot_price, 'spot_block_duration_minutes': self.spot_block_duration_minutes, 'placement_group_strategy': self.placement_group.strategy if self.placement_group else placement_group.PLACEMENT_GROUP_NONE, 'aws_credit_specification': FLAGS.aws_credit_specification if FLAGS.aws_credit_specification else 'none' }) self.spot_early_termination = False self.spot_status_code = None # See: # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-os.html self._smp_affinity_script = 'smp_affinity.sh' if self.use_dedicated_host and util.IsRegion(self.zone): raise ValueError( 'In order to use dedicated hosts, you must specify an availability ' 'zone, not a region ("zone" was %s).' % self.zone) if self.use_dedicated_host and self.use_spot_instance: raise ValueError( 'Tenancy=host is not supported for Spot Instances') self.allocation_id = None self.association_id = None self.aws_tags = {} @property def host_list(self): """Returns the list of hosts that are compatible with this VM.""" return self.host_map[(self.machine_type, self.zone)] @property def group_id(self): """Returns the security group ID of this VM.""" return self.network.regional_network.vpc.default_security_group_id @classmethod def GetDefaultImage(cls, machine_type, region): """Returns the default image given the machine type and region. If specified, the aws_image_name_filter and aws_image_name_regex flags will override os_type defaults. Args: machine_type: The machine_type of the VM, used to determine virtualization type. region: The region of the VM, as images are region specific. Raises: AwsImageNotFoundError: If a default image cannot be found. Returns: The ID of the latest image, or None if no default image is configured or none can be found. """ # These cannot be REQUIRED_ATTRS, because nesting REQUIRED_ATTRS breaks. if not cls.IMAGE_OWNER: raise NotImplementedError('AWS OSMixins require IMAGE_OWNER') if not cls.IMAGE_NAME_FILTER: raise NotImplementedError('AWS OSMixins require IMAGE_NAME_FILTER') if FLAGS.aws_image_name_filter: cls.IMAGE_NAME_FILTER = FLAGS.aws_image_name_filter if FLAGS.aws_image_name_regex: cls.IMAGE_NAME_REGEX = FLAGS.aws_image_name_regex prefix = machine_type.split('.')[0] virt_type = PV if prefix in NON_HVM_PREFIXES else HVM processor_architecture = GetProcessorArchitecture(machine_type) describe_cmd = util.AWS_PREFIX + [ '--region=%s' % region, 'ec2', 'describe-images', '--query', ('Images[*].{Name:Name,ImageId:ImageId,' 'CreationDate:CreationDate}'), '--filters', 'Name=name,Values=%s' % cls.IMAGE_NAME_FILTER, 'Name=block-device-mapping.volume-type,Values=%s' % cls.DEFAULT_ROOT_DISK_TYPE, 'Name=virtualization-type,Values=%s' % virt_type, 'Name=architecture,Values=%s' % processor_architecture] if cls.IMAGE_PRODUCT_CODE_FILTER: describe_cmd.extend(['Name=product-code,Values=%s' % cls.IMAGE_PRODUCT_CODE_FILTER]) if cls.IMAGE_DESCRIPTION_FILTER: describe_cmd.extend(['Name=description,Values=%s' % cls.IMAGE_DESCRIPTION_FILTER]) describe_cmd.extend(['--owners'] + cls.IMAGE_OWNER) stdout, _ = util.IssueRetryableCommand(describe_cmd) if not stdout: raise AwsImageNotFoundError('aws describe-images did not produce valid ' 'output.') if cls.IMAGE_NAME_REGEX: # Further filter images by the IMAGE_NAME_REGEX filter. image_name_regex = cls.IMAGE_NAME_REGEX.format( virt_type=virt_type, disk_type=cls.DEFAULT_ROOT_DISK_TYPE, architecture=processor_architecture) images = [] excluded_images = [] for image in json.loads(stdout): if re.search(image_name_regex, image['Name']): images.append(image) else: excluded_images.append(image) if excluded_images: logging.debug('Excluded the following images with regex "%s": %s', image_name_regex, sorted(image['Name'] for image in excluded_images)) else: images = json.loads(stdout) if not images: raise AwsImageNotFoundError('No AMIs with given filters found.') return max(images, key=lambda image: image['CreationDate'])['ImageId'] @vm_util.Retry(max_retries=2) def _PostCreate(self): """Get the instance's data and tag it.""" describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id] logging.info('Getting instance %s public IP. This will fail until ' 'a public IP is available, but will be retried.', self.id) stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) instance = response['Reservations'][0]['Instances'][0] self.internal_ip = instance['PrivateIpAddress'] if util.IsRegion(self.zone): self.zone = str(instance['Placement']['AvailabilityZone']) assert self.group_id == instance['SecurityGroups'][0]['GroupId'], ( self.group_id, instance['SecurityGroups'][0]['GroupId']) if FLAGS.aws_efa: self._ConfigureEfa(instance) elif 'PublicIpAddress' in instance: self.ip_address = instance['PublicIpAddress'] else: raise errors.Resource.RetryableCreationError('Public IP not ready.') def _ConfigureEfa(self, instance): """Configuare EFA and associate Elastic IP. Args: instance: dict which contains instance info. """ if FLAGS.aws_efa_count > 1: self._ConfigureElasticIp(instance) else: self.ip_address = instance['PublicIpAddress'] if FLAGS.aws_efa_version: # Download EFA then call InstallEfa method so that subclass can override self.InstallPackages('curl') url = _EFA_URL.format(version=FLAGS.aws_efa_version) tarfile = posixpath.basename(url) self.RemoteCommand(f'curl -O {url}; tar -xzf {tarfile}') self._InstallEfa() # Run test program to confirm EFA working self.RemoteCommand('cd aws-efa-installer; ' 'PATH=${PATH}:/opt/amazon/efa/bin ./efa_test.sh') def _ConfigureElasticIp(self, instance): """Create and associate Elastic IP. Args: instance: dict which contains instance info. """ network_interface_id = None for network_interface in instance['NetworkInterfaces']: # The primary network interface (eth0) for the instance. if network_interface['Attachment']['DeviceIndex'] == 0: network_interface_id = network_interface['NetworkInterfaceId'] break assert network_interface_id is not None stdout, _, _ = vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'allocate-address', f'--region={self.region}', '--domain=vpc']) response = json.loads(stdout) self.ip_address = response['PublicIp'] self.allocation_id = response['AllocationId'] util.AddDefaultTags(self.allocation_id, self.region) stdout, _, _ = vm_util.IssueCommand( util.AWS_PREFIX + ['ec2', 'associate-address', f'--region={self.region}', f'--allocation-id={self.allocation_id}', f'--network-interface-id={network_interface_id}']) response = json.loads(stdout) self.association_id = response['AssociationId'] def _InstallEfa(self): """Installs AWS EFA packages. See https://aws.amazon.com/hpc/efa/ """ if not self.TryRemoteCommand('ulimit -l | grep unlimited'): self.RemoteCommand(f'echo "{self.user_name} - memlock unlimited" | ' 'sudo tee -a /etc/security/limits.conf') self.RemoteCommand('cd aws-efa-installer; sudo ./efa_installer.sh -y') if not self.TryRemoteCommand('ulimit -l | grep unlimited'): # efa_installer.sh should reboot enabling this change, reboot if necessary self.Reboot() def _CreateDependencies(self): """Create VM dependencies.""" AwsKeyFileManager.ImportKeyfile(self.region) # GetDefaultImage calls the AWS CLI. self.image = self.image or self.GetDefaultImage(self.machine_type, self.region) self.AllowRemoteAccessPorts() if self.use_dedicated_host: with self._lock: if (not self.host_list or (self.num_vms_per_host and self.host_list[-1].fill_fraction + 1.0 / self.num_vms_per_host > 1.0)): host = AwsDedicatedHost(self.machine_type, self.zone) self.host_list.append(host) host.Create() self.host = self.host_list[-1] if self.num_vms_per_host: self.host.fill_fraction += 1.0 / self.num_vms_per_host def _DeleteDependencies(self): """Delete VM dependencies.""" AwsKeyFileManager.DeleteKeyfile(self.region) if self.host: with self._lock: if self.host in self.host_list: self.host_list.remove(self.host) if self.host not in self.deleted_hosts: self.host.Delete() self.deleted_hosts.add(self.host) def _Create(self): """Create a VM instance.""" placement = [] if not util.IsRegion(self.zone): placement.append('AvailabilityZone=%s' % self.zone) if self.use_dedicated_host: placement.append('Tenancy=host,HostId=%s' % self.host.id) num_hosts = len(self.host_list) elif self.placement_group: if IsPlacementGroupCompatible(self.machine_type): placement.append('GroupName=%s' % self.placement_group.name) else: logging.warning( 'VM not placed in Placement Group. VM Type %s not supported', self.machine_type) placement = ','.join(placement) block_device_map = GetBlockDeviceMap(self.machine_type, self.boot_disk_size, self.image, self.region) if not self.aws_tags: # Set tags for the AWS VM. If we are retrying the create, we have to use # the same tags from the previous call. self.aws_tags.update(self.vm_metadata) self.aws_tags.update(util.MakeDefaultTags()) create_cmd = util.AWS_PREFIX + [ 'ec2', 'run-instances', '--region=%s' % self.region, '--client-token=%s' % self.client_token, '--image-id=%s' % self.image, '--instance-type=%s' % self.machine_type, '--key-name=%s' % AwsKeyFileManager.GetKeyNameForRun(), '--tag-specifications=%s' % util.FormatTagSpecifications('instance', self.aws_tags)] if FLAGS.aws_vm_hibernate: create_cmd.extend([ '--hibernation-options=Configured=true', ]) if FLAGS.disable_smt: query_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instance-types', '--instance-types', self.machine_type, '--query', 'InstanceTypes[0].VCpuInfo.DefaultCores' ] stdout, _, retcode = vm_util.IssueCommand(query_cmd) cores = int(json.loads(stdout)) create_cmd.append(f'--cpu-options=CoreCount={cores},ThreadsPerCore=1') if FLAGS.aws_efa: efas = ['--network-interfaces'] for device_index in range(FLAGS.aws_efa_count): efa_params = _EFA_PARAMS.copy() efa_params.update({ 'NetworkCardIndex': device_index, 'DeviceIndex': device_index, 'Groups': self.group_id, 'SubnetId': self.network.subnet.id }) if FLAGS.aws_efa_count == 1: efa_params['AssociatePublicIpAddress'] = True efas.append(','.join(f'{key}={value}' for key, value in sorted(efa_params.items()))) create_cmd.extend(efas) else: create_cmd.append('--associate-public-ip-address') create_cmd.append(f'--subnet-id={self.network.subnet.id}') if block_device_map: create_cmd.append('--block-device-mappings=%s' % block_device_map) if placement: create_cmd.append('--placement=%s' % placement) if FLAGS.aws_credit_specification: create_cmd.append('--credit-specification=%s' % FLAGS.aws_credit_specification) if self.user_data: create_cmd.append('--user-data=%s' % self.user_data) if self.capacity_reservation_id: create_cmd.append( '--capacity-reservation-specification=CapacityReservationTarget=' '{CapacityReservationId=%s}' % self.capacity_reservation_id) if self.use_spot_instance: instance_market_options = collections.OrderedDict() spot_options = collections.OrderedDict() spot_options['SpotInstanceType'] = 'one-time' spot_options['InstanceInterruptionBehavior'] = 'terminate' if self.spot_price: spot_options['MaxPrice'] = str(self.spot_price) if self.spot_block_duration_minutes: spot_options['BlockDurationMinutes'] = self.spot_block_duration_minutes instance_market_options['MarketType'] = 'spot' instance_market_options['SpotOptions'] = spot_options create_cmd.append( '--instance-market-options=%s' % json.dumps(instance_market_options)) _, stderr, retcode = vm_util.IssueCommand(create_cmd, raise_on_failure=False) arm_arch = GetArmArchitecture(self.machine_type) if arm_arch: self.host_arch = arm_arch if self.use_dedicated_host and 'InsufficientCapacityOnHost' in stderr: if self.num_vms_per_host: raise errors.Resource.CreationError( 'Failed to create host: %d vms of type %s per host exceeds ' 'memory capacity limits of the host' % (self.num_vms_per_host, self.machine_type)) else: logging.warning( 'Creation failed due to insufficient host capacity. A new host will ' 'be created and instance creation will be retried.') with self._lock: if num_hosts == len(self.host_list): host = AwsDedicatedHost(self.machine_type, self.zone) self.host_list.append(host) host.Create() self.host = self.host_list[-1] self.client_token = str(uuid.uuid4()) raise errors.Resource.RetryableCreationError() if 'InsufficientInstanceCapacity' in stderr: if self.use_spot_instance: self.spot_status_code = 'InsufficientSpotInstanceCapacity' self.spot_early_termination = True raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr) if 'SpotMaxPriceTooLow' in stderr: self.spot_status_code = 'SpotMaxPriceTooLow' self.spot_early_termination = True raise errors.Resource.CreationError(stderr) if 'InstanceLimitExceeded' in stderr or 'VcpuLimitExceeded' in stderr: raise errors.Benchmarks.QuotaFailure(stderr) if 'RequestLimitExceeded' in stderr: if FLAGS.retry_on_rate_limited: raise errors.Resource.RetryableCreationError(stderr) else: raise errors.Benchmarks.QuotaFailure(stderr) # When launching more than 1 VM into the same placement group, there is an # occasional error that the placement group has already been used in a # separate zone. Retrying fixes this error. if 'InvalidPlacementGroup.InUse' in stderr: raise errors.Resource.RetryableCreationError(stderr) if 'Unsupported' in stderr: raise errors.Benchmarks.UnsupportedConfigError(stderr) if retcode: raise errors.Resource.CreationError( 'Failed to create VM: %s return code: %s' % (retcode, stderr)) @vm_util.Retry( poll_interval=0.5, log_errors=True, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _WaitForStoppedStatus(self): """Returns the status of the VM. Returns: Whether the VM is suspended i.e. in a stopped status. If not, raises an error Raises: AwsUnknownStatusError: If an unknown status is returned from AWS. AwsTransitionalVmRetryableError: If the VM is pending. This is retried. """ describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instance-status', '--region=%s' % self.region, '--instance-ids=%s' % self.id, '--include-all-instances', ] stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) status = response['InstanceStatuses'][0]['InstanceState']['Name'] if status.lower() != 'stopped': logging.info('VM has status %s.', status) raise AwsTransitionalVmRetryableError() def _BeforeSuspend(self): """Prepares the instance for suspend by having the VM sleep for a given duration. This ensures the VM is ready for hibernation """ # Add a timer that waits for a given duration after vm instance is # created before calling suspend on the vm to ensure that the vm is # ready for hibernation in aws. time.sleep(600) def _PostSuspend(self): self._WaitForStoppedStatus() def _Suspend(self): """Suspends a VM instance.""" suspend_cmd = util.AWS_PREFIX + [ 'ec2', 'stop-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id, '--hibernate', ] try: vm_util.IssueCommand(suspend_cmd) except: raise errors.Benchmarks.KnownIntermittentError( 'Instance is still not ready to hibernate') self._PostSuspend() @vm_util.Retry( poll_interval=0.5, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _WaitForNewIP(self): """Checks for a new IP address, waiting if the VM is still pending. Raises: AwsTransitionalVmRetryableError: If VM is pending. This is retried. """ status_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] stdout, _, _ = vm_util.IssueCommand(status_cmd) response = json.loads(stdout) instance = response['Reservations'][0]['Instances'][0] if 'PublicIpAddress' in instance: self.ip_address = instance['PublicIpAddress'] else: logging.info('VM is pending.') raise AwsTransitionalVmRetryableError() def _PostResume(self): self._WaitForNewIP() def _Resume(self): """Resumes a VM instance.""" resume_cmd = util.AWS_PREFIX + [ 'ec2', 'start-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id, ] vm_util.IssueCommand(resume_cmd) self._PostResume() def _Delete(self): """Delete a VM instance.""" if self.id: delete_cmd = util.AWS_PREFIX + [ 'ec2', 'terminate-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) if hasattr(self, 'spot_instance_request_id'): cancel_cmd = util.AWS_PREFIX + [ '--region=%s' % self.region, 'ec2', 'cancel-spot-instance-requests', '--spot-instance-request-ids=%s' % self.spot_instance_request_id] vm_util.IssueCommand(cancel_cmd, raise_on_failure=False) if FLAGS.aws_efa: if self.association_id: vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'disassociate-address', f'--region={self.region}', f'--association-id={self.association_id}']) if self.allocation_id: vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'release-address', f'--region={self.region}', f'--allocation-id={self.allocation_id}']) # _Start or _Stop not yet implemented for AWS def _Start(self): """Starts the VM.""" if not self.id: raise errors.Benchmarks.RunError( 'Expected VM id to be non-null. Please make sure the VM exists.') start_cmd = util.AWS_PREFIX + [ 'ec2', 'start-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] vm_util.IssueCommand(start_cmd) def _PostStart(self): self._WaitForNewIP() def _Stop(self): """Stops the VM.""" if not self.id: raise errors.Benchmarks.RunError( 'Expected VM id to be non-null. Please make sure the VM exists.') stop_cmd = util.AWS_PREFIX + [ 'ec2', 'stop-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] vm_util.IssueCommand(stop_cmd) def _PostStop(self): self._WaitForStoppedStatus() def _UpdateInterruptibleVmStatusThroughApi(self): if hasattr(self, 'spot_instance_request_id'): describe_cmd = util.AWS_PREFIX + [ '--region=%s' % self.region, 'ec2', 'describe-spot-instance-requests', '--spot-instance-request-ids=%s' % self.spot_instance_request_id] stdout, _, _ = vm_util.IssueCommand(describe_cmd) sir_response = json.loads(stdout)['SpotInstanceRequests'] self.spot_status_code = sir_response[0]['Status']['Code'] self.spot_early_termination = ( self.spot_status_code in AWS_INITIATED_SPOT_TERMINAL_STATUSES) @vm_util.Retry( poll_interval=1, log_errors=False, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _Exists(self): """Returns whether the VM exists. This method waits until the VM is no longer pending. Returns: Whether the VM exists. Raises: AwsUnknownStatusError: If an unknown status is returned from AWS. AwsTransitionalVmRetryableError: If the VM is pending. This is retried. """ describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', '--region=%s' % self.region, '--filter=Name=client-token,Values=%s' % self.client_token] stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) reservations = response['Reservations'] assert len(reservations) < 2, 'Too many reservations.' if not reservations: if not self.create_start_time: return False logging.info('No reservation returned by describe-instances. This ' 'sometimes shows up immediately after a successful ' 'run-instances command. Retrying describe-instances ' 'command.') raise AwsTransitionalVmRetryableError() instances = reservations[0]['Instances'] assert len(instances) == 1, 'Wrong number of instances.' status = instances[0]['State']['Name'] self.id = instances[0]['InstanceId'] if self.use_spot_instance: self.spot_instance_request_id = instances[0]['SpotInstanceRequestId'] if status not in INSTANCE_KNOWN_STATUSES: raise AwsUnknownStatusError('Unknown status %s' % status) if status in INSTANCE_TRANSITIONAL_STATUSES: logging.info('VM has status %s; retrying describe-instances command.', status) raise AwsTransitionalVmRetryableError() # In this path run-instances succeeded, a pending instance was created, but # not fulfilled so it moved to terminated. if (status == TERMINATED and instances[0]['StateReason']['Code'] == 'Server.InsufficientInstanceCapacity'): raise errors.Benchmarks.InsufficientCapacityCloudFailure( instances[0]['StateReason']['Message']) # In this path run-instances succeeded, a pending instance was created, but # instance is shutting down due to internal server error. This is a # retryable command for run-instance. # Client token needs to be refreshed for idempotency. if (status == SHUTTING_DOWN and instances[0]['StateReason']['Code'] == 'Server.InternalError'): self.client_token = str(uuid.uuid4()) return status in INSTANCE_EXISTS_STATUSES def _GetNvmeBootIndex(self): if (aws_disk.LocalDriveIsNvme(self.machine_type) and aws_disk.EbsDriveIsNvme(self.machine_type)): # identify boot drive # If this command ever fails consider 'findmnt -nM / -o source' cmd = ('realpath /dev/disk/by-label/cloudimg-rootfs ' '| grep --only-matching "nvme[0-9]*"') boot_drive = self.RemoteCommand(cmd, ignore_failure=True)[0].strip() if boot_drive: # get the boot drive index by dropping the nvme prefix boot_idx = int(boot_drive[4:]) logging.info('found boot drive at nvme index %d', boot_idx) return boot_idx else: logging.warning('Failed to identify NVME boot drive index. Assuming 0.') return 0 def CreateScratchDisk(self, disk_spec): """Create a VM's scratch disk. Args: disk_spec: virtual_machine.BaseDiskSpec object of the disk. Raises: CreationError: If an NFS disk is listed but the NFS service not created. """ # Instantiate the disk(s) that we want to create. disks = [] nvme_boot_drive_index = self._GetNvmeBootIndex() for _ in range(disk_spec.num_striped_disks): if disk_spec.disk_type == disk.NFS: data_disk = self._GetNfsService().CreateNfsDisk() else: data_disk = aws_disk.AwsDisk(disk_spec, self.zone, self.machine_type) if disk_spec.disk_type == disk.LOCAL: device_letter = chr(ord(DRIVE_START_LETTER) + self.local_disk_counter) data_disk.AssignDeviceLetter(device_letter, nvme_boot_drive_index) # Local disk numbers start at 1 (0 is the system disk). data_disk.disk_number = self.local_disk_counter + 1 self.local_disk_counter += 1 if self.local_disk_counter > self.max_local_disks: raise errors.Error('Not enough local disks.') elif disk_spec.disk_type == disk.NFS: pass else: # Remote disk numbers start at 1 + max_local disks (0 is the system disk # and local disks occupy [1, max_local_disks]). data_disk.disk_number = (self.remote_disk_counter + 1 + self.max_local_disks) self.remote_disk_counter += 1 disks.append(data_disk) self._CreateScratchDiskFromDisks(disk_spec, disks) def AddMetadata(self, **kwargs): """Adds metadata to the VM.""" util.AddTags(self.id, self.region, **kwargs) if self.use_spot_instance: util.AddDefaultTags(self.spot_instance_request_id, self.region) def InstallCli(self): """Installs the AWS cli and credentials on this AWS vm.""" self.Install('awscli') self.Install('aws_credentials') def DownloadPreprovisionedData(self, install_path, module_name, filename): """Downloads a data file from an AWS S3 bucket with pre-provisioned data. Use --aws_preprovisioned_data_bucket to specify the name of the bucket. Args: install_path: The install path on this VM. module_name: Name of the module associated with this data file. filename: The name of the file that was downloaded. """ self.InstallCli() # TODO(deitz): Add retry logic. self.RemoteCommand(GenerateDownloadPreprovisionedDataCommand( install_path, module_name, filename)) def ShouldDownloadPreprovisionedData(self, module_name, filename): """Returns whether or not preprovisioned data is available.""" self.Install('aws_credentials') self.Install('awscli') return FLAGS.aws_preprovisioned_data_bucket and self.TryRemoteCommand( GenerateStatPreprovisionedDataCommand(module_name, filename)) def IsInterruptible(self): """Returns whether this vm is an interruptible vm (spot vm). Returns: True if this vm is an interruptible vm (spot vm). """ return self.use_spot_instance def WasInterrupted(self): """Returns whether this spot vm was terminated early by AWS. Returns: True if this vm was terminated early by AWS. """ return self.spot_early_termination def GetVmStatusCode(self): """Returns the early termination code if any. Returns: Early termination code. """ return self.spot_status_code def GetResourceMetadata(self): """Returns a dict containing metadata about the VM. Returns: dict mapping string property key to value. """ result = super(AwsVirtualMachine, self).GetResourceMetadata() result['boot_disk_type'] = self.DEFAULT_ROOT_DISK_TYPE result['boot_disk_size'] = self.boot_disk_size if self.use_dedicated_host: result['num_vms_per_host'] = self.num_vms_per_host result['efa'] = FLAGS.aws_efa if FLAGS.aws_efa: result['efa_version'] = FLAGS.aws_efa_version result['efa_count'] = FLAGS.aws_efa_count result['preemptible'] = self.use_spot_instance return result class ClearBasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.ClearMixin): IMAGE_NAME_FILTER = 'clear/images/*/clear-*' DEFAULT_USER_NAME = 'clear' class CoreOsBasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CoreOsMixin): IMAGE_NAME_FILTER = 'fedora-coreos-*-hvm' # CoreOS only distinguishes between stable and testing in the description IMAGE_DESCRIPTION_FILTER = 'Fedora CoreOS stable *' IMAGE_OWNER = CENTOS_IMAGE_PROJECT DEFAULT_USER_NAME = 'core' class Debian9BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian9Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Stretch IMAGE_NAME_FILTER = 'debian-stretch-*64-*' IMAGE_OWNER = DEBIAN_9_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' def _BeforeSuspend(self): """Prepares the aws vm for hibernation.""" raise NotImplementedError() class Debian10BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian10Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster IMAGE_NAME_FILTER = 'debian-10-*64*' IMAGE_OWNER = DEBIAN_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' class Debian11BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian11Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster IMAGE_NAME_FILTER = 'debian-11-*64*' IMAGE_OWNER = DEBIAN_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' class UbuntuBasedAwsVirtualMachine(AwsVirtualMachine): IMAGE_OWNER = UBUNTU_IMAGE_PROJECT DEFAULT_USER_NAME = 'ubuntu' class Ubuntu1604BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-xenial-16.04-*64-server-20*' def _InstallEfa(self): super(Ubuntu1604BasedAwsVirtualMachine, self)._InstallEfa() self.Reboot() self.WaitForBootCompletion() class Ubuntu1804BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1804Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-bionic-18.04-*64-server-20*' class Ubuntu1804EfaBasedAwsVirtualMachine( UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1804EfaMixin): IMAGE_OWNER = UBUNTU_EFA_IMAGE_PROJECT IMAGE_NAME_FILTER = 'Deep Learning AMI GPU CUDA * (Ubuntu 18.04) *' class Ubuntu2004BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu2004Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-focal-20.04-*64-server-20*' class JujuBasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.JujuMixin): """Class with configuration for AWS Juju virtual machines.""" IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-trusty-14.04-*64-server-20*' class AmazonLinux2BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.AmazonLinux2Mixin): """Class with configuration for AWS Amazon Linux 2 virtual machines.""" IMAGE_NAME_FILTER = 'amzn2-ami-*-*-*' IMAGE_OWNER = AMAZON_LINUX_IMAGE_PROJECT class Rhel7BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Rhel7Mixin): """Class with configuration for AWS RHEL 7 virtual machines.""" # Documentation on finding RHEL images: # https://access.redhat.com/articles/2962171 IMAGE_NAME_FILTER = 'RHEL-7*_GA*' IMAGE_OWNER = RHEL_IMAGE_PROJECT class Rhel8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Rhel8Mixin): """Class with configuration for AWS RHEL 8 virtual machines.""" # Documentation on finding RHEL images: # https://access.redhat.com/articles/2962181 # All RHEL AMIs are HVM. HVM- blocks HVM_BETA. IMAGE_NAME_FILTER = 'RHEL-8*_HVM-*' IMAGE_OWNER = RHEL_IMAGE_PROJECT class CentOs7BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CentOs7Mixin): """Class with configuration for AWS CentOS 7 virtual machines.""" # Documentation on finding the CentOS 7 image: # https://wiki.centos.org/Cloud/AWS#x86_64 IMAGE_NAME_FILTER = 'CentOS 7*' IMAGE_OWNER = CENTOS_IMAGE_PROJECT DEFAULT_USER_NAME = 'centos' def _InstallEfa(self): logging.info('Upgrading Centos7 kernel, installing kernel headers and ' 'rebooting before installing EFA.') self.RemoteCommand('sudo yum upgrade -y kernel') self.InstallPackages('kernel-devel') self.Reboot() self.WaitForBootCompletion() super(CentOs7BasedAwsVirtualMachine, self)._InstallEfa() class CentOs8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CentOs8Mixin): """Class with configuration for AWS CentOS 8 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS 8*' DEFAULT_USER_NAME = 'centos' class CentOsStream8BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.CentOsStream8Mixin): """Class with configuration for AWS CentOS Stream 8 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS Stream 8*' DEFAULT_USER_NAME = 'centos' class RockyLinux8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.RockyLinux8Mixin): """Class with configuration for AWS Rocky Linux 8 virtual machines.""" IMAGE_OWNER = MARKETPLACE_IMAGE_PROJECT IMAGE_PRODUCT_CODE_FILTER = 'cotnnspjrsi38lfn8qo4ibnnm' IMAGE_NAME_FILTER = 'Rocky-8-*' DEFAULT_USER_NAME = 'rocky' class CentOsStream9BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.CentOsStream9Mixin): """Class with configuration for AWS CentOS Stream 9 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS Stream 9*' class BaseWindowsAwsVirtualMachine(AwsVirtualMachine, windows_virtual_machine.BaseWindowsMixin): """Support for Windows machines on AWS.""" DEFAULT_USER_NAME = 'Administrator' IMAGE_OWNER = WINDOWS_IMAGE_PROJECT def __init__(self, vm_spec): super(BaseWindowsAwsVirtualMachine, self).__init__(vm_spec) self.user_data = ('<powershell>%s</powershell>' % windows_virtual_machine.STARTUP_SCRIPT) @vm_util.Retry() def _GetDecodedPasswordData(self): # Retrieve a base64 encoded, encrypted password for the VM. get_password_cmd = util.AWS_PREFIX + [ 'ec2', 'get-password-data', '--region=%s' % self.region, '--instance-id=%s' % self.id] stdout, _ = util.IssueRetryableCommand(get_password_cmd) response = json.loads(stdout) password_data = response['PasswordData'] # AWS may not populate the password data until some time after # the VM shows as running. Simply retry until the data shows up. if not password_data: raise ValueError('No PasswordData in response.') # Decode the password data. return base64.b64decode(password_data) def _PostCreate(self): """Retrieve generic VM info and then retrieve the VM's password.""" super(BaseWindowsAwsVirtualMachine, self)._PostCreate() # Get the decoded password data. decoded_password_data = self._GetDecodedPasswordData() # Write the encrypted data to a file, and use openssl to # decrypt the password. with vm_util.NamedTemporaryFile() as tf: tf.write(decoded_password_data) tf.close() decrypt_cmd = ['openssl', 'rsautl', '-decrypt', '-in', tf.name, '-inkey', vm_util.GetPrivateKeyPath()] password, _ = vm_util.IssueRetryableCommand(decrypt_cmd) self.password = password def GetResourceMetadata(self): """Returns a dict containing metadata about the VM. Returns: dict mapping metadata key to value. """ result = super(BaseWindowsAwsVirtualMachine, self).GetResourceMetadata() result['disable_interrupt_moderation'] = self.disable_interrupt_moderation return result @vm_util.Retry( max_retries=10, retryable_exceptions=(AwsUnexpectedWindowsAdapterOutputError, errors.VirtualMachine.RemoteCommandError)) def DisableInterruptModeration(self): """Disable the networking feature 'Interrupt Moderation'.""" # First ensure that the driver supports interrupt moderation net_adapters, _ = self.RemoteCommand('Get-NetAdapter') if 'Intel(R) 82599 Virtual Function' not in net_adapters: raise AwsDriverDoesntSupportFeatureError( 'Driver not tested with Interrupt Moderation in PKB.') aws_int_dis_path = ('HKLM\\SYSTEM\\ControlSet001\\Control\\Class\\' '{4d36e972-e325-11ce-bfc1-08002be10318}\\0011') command = 'reg add "%s" /v *InterruptModeration /d 0 /f' % aws_int_dis_path self.RemoteCommand(command) try: self.RemoteCommand('Restart-NetAdapter -Name "Ethernet 2"') except IOError: # Restarting the network adapter will always fail because # the winrm connection used to issue the command will be # broken. pass int_dis_value, _ = self.RemoteCommand( 'reg query "%s" /v *InterruptModeration' % aws_int_dis_path) # The second line should look like: # *InterruptModeration REG_SZ 0 registry_query_lines = int_dis_value.splitlines() if len(registry_query_lines) < 3: raise AwsUnexpectedWindowsAdapterOutputError( 'registry query failed: %s ' % int_dis_value) registry_query_result = registry_query_lines[2].split() if len(registry_query_result) < 3: raise AwsUnexpectedWindowsAdapterOutputError( 'unexpected registry query response: %s' % int_dis_value) if registry_query_result[2] != '0': raise AwsUnexpectedWindowsAdapterOutputError( 'InterruptModeration failed to disable') class Windows2012CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2012CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2012-R2_RTM-English-64Bit-Core-*' class Windows2016CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2016CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2016-English-Core-Base-*' class Windows2019CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Core-Base-*' class Windows2022CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Core-Base-*' class Windows2012DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2012DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2012-R2_RTM-English-64Bit-Base-*' class Windows2016DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2016DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2016-English-Full-Base-*' class Windows2019DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-Base-*' class Windows2022DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-Base-*' class Windows2019DesktopSQLServer2019StandardAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019SQLServer2019Standard): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-SQL_2019_Standard-*' class Windows2019DesktopSQLServer2019EnterpriseAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019SQLServer2019Enterprise): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-SQL_2019_Enterprise-*' class Windows2022DesktopSQLServer2019StandardAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022SQLServer2019Standard): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-SQL_2019_Standard-*' class Windows2022DesktopSQLServer2019EnterpriseAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022SQLServer2019Enterprise): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-SQL_2019_Enterprise-*' def GenerateDownloadPreprovisionedDataCommand(install_path, module_name, filename): """Returns a string used to download preprovisioned data.""" return 'aws s3 cp --only-show-errors s3://%s/%s/%s %s' % ( FLAGS.aws_preprovisioned_data_bucket, module_name, filename, posixpath.join(install_path, filename)) def GenerateStatPreprovisionedDataCommand(module_name, filename): """Returns a string used to download preprovisioned data.""" return 'aws s3api head-object --bucket %s --key %s/%s' % ( FLAGS.aws_preprovisioned_data_bucket, module_name, filename) Add AWS c7g support. PiperOrigin-RevId: 450560510 # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class to represent an AWS Virtual Machine object. Images: aws ec2 describe-images --owners self amazon All VM specifics are self-contained and the class provides methods to operate on the VM: boot, shutdown, etc. """ import base64 import collections import json import logging import posixpath import re import threading import time import uuid from absl import flags from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import linux_virtual_machine from perfkitbenchmarker import placement_group from perfkitbenchmarker import providers from perfkitbenchmarker import resource from perfkitbenchmarker import virtual_machine from perfkitbenchmarker import vm_util from perfkitbenchmarker import windows_virtual_machine from perfkitbenchmarker.configs import option_decoders from perfkitbenchmarker.providers.aws import aws_disk from perfkitbenchmarker.providers.aws import aws_network from perfkitbenchmarker.providers.aws import util from six.moves import range FLAGS = flags.FLAGS HVM = 'hvm' PV = 'paravirtual' NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2'] NON_PLACEMENT_GROUP_PREFIXES = frozenset( ['t2', 'm3', 't3', 't3a', 't4g', 'vt1']) DRIVE_START_LETTER = 'b' TERMINATED = 'terminated' SHUTTING_DOWN = 'shutting-down' INSTANCE_EXISTS_STATUSES = frozenset(['running', 'stopping', 'stopped']) INSTANCE_DELETED_STATUSES = frozenset([SHUTTING_DOWN, TERMINATED]) INSTANCE_TRANSITIONAL_STATUSES = frozenset(['pending']) INSTANCE_KNOWN_STATUSES = (INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES | INSTANCE_TRANSITIONAL_STATUSES) HOST_EXISTS_STATES = frozenset( ['available', 'under-assessment', 'permanent-failure']) HOST_RELEASED_STATES = frozenset(['released', 'released-permanent-failure']) KNOWN_HOST_STATES = HOST_EXISTS_STATES | HOST_RELEASED_STATES AWS_INITIATED_SPOT_TERMINATING_TRANSITION_STATUSES = frozenset( ['marked-for-termination', 'marked-for-stop']) AWS_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['instance-terminated-by-price', 'instance-terminated-by-service', 'instance-terminated-no-capacity', 'instance-terminated-capacity-oversubscribed', 'instance-terminated-launch-group-constraint']) USER_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['request-canceled-and-instance-running', 'instance-terminated-by-user']) # These are the project numbers of projects owning common images. # Some numbers have corresponding owner aliases, but they are not used here. AMAZON_LINUX_IMAGE_PROJECT = [ '137112412989', # alias amazon most regions '210953353124', # alias amazon for af-south-1 '910595266909', # alias amazon for ap-east-1 '071630900071', # alias amazon for eu-south-1 ] # From https://wiki.debian.org/Cloud/AmazonEC2Image/Stretch # Marketplace AMI exists, but not in all regions DEBIAN_9_IMAGE_PROJECT = ['379101102735'] # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster # From https://wiki.debian.org/Cloud/AmazonEC2Image/Bullseye DEBIAN_IMAGE_PROJECT = ['136693071363'] # Owns AMIs lists here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images # Also owns the AMIS listed in # https://builds.coreos.fedoraproject.org/streams/stable.json CENTOS_IMAGE_PROJECT = ['125523088429'] MARKETPLACE_IMAGE_PROJECT = ['679593333241'] # alias aws-marketplace # https://access.redhat.com/articles/2962171 RHEL_IMAGE_PROJECT = ['309956199498'] # https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29 UBUNTU_IMAGE_PROJECT = ['099720109477'] # Owned by canonical # Some Windows images are also available in marketplace project, but this is the # one selected by the AWS console. WINDOWS_IMAGE_PROJECT = ['801119661308'] # alias amazon UBUNTU_EFA_IMAGE_PROJECT = ['898082745236'] # Processor architectures ARM = 'arm64' X86 = 'x86_64' # Machine type to ARM architecture. _MACHINE_TYPE_PREFIX_TO_ARM_ARCH = { 'a1': 'cortex-a72', 'c6g': 'graviton2', 'c7g': 'graviton2', 'g5g': 'graviton2', 'm6g': 'graviton2', 'r6g': 'graviton2', 't4g': 'graviton2', 'im4g': 'graviton2', 'is4ge': 'graviton2', 'x2g': 'graviton2', } # Parameters for use with Elastic Fiber Adapter _EFA_PARAMS = { 'InterfaceType': 'efa', 'DeviceIndex': 0, 'NetworkCardIndex': 0, 'Groups': '', 'SubnetId': '' } # Location of EFA installer _EFA_URL = ('https://s3-us-west-2.amazonaws.com/aws-efa-installer/' 'aws-efa-installer-{version}.tar.gz') class AwsTransitionalVmRetryableError(Exception): """Error for retrying _Exists when an AWS VM is in a transitional state.""" class AwsDriverDoesntSupportFeatureError(Exception): """Raised if there is an attempt to set a feature not supported.""" class AwsUnexpectedWindowsAdapterOutputError(Exception): """Raised when querying the status of a windows adapter failed.""" class AwsUnknownStatusError(Exception): """Error indicating an unknown status was encountered.""" class AwsImageNotFoundError(Exception): """Error indicating no appropriate AMI could be found.""" def GetRootBlockDeviceSpecForImage(image_id, region): """Queries the CLI and returns the root block device specification as a dict. Args: image_id: The EC2 image id to query region: The EC2 region in which the image resides Returns: The root block device specification as returned by the AWS cli, as a Python dict. If the image is not found, or if the response is malformed, an exception will be raised. """ command = util.AWS_PREFIX + [ 'ec2', 'describe-images', '--region=%s' % region, '--image-ids=%s' % image_id, '--query', 'Images[]'] stdout, _ = util.IssueRetryableCommand(command) images = json.loads(stdout) assert images assert len(images) == 1, ( 'Expected to receive only one image description for %s' % image_id) image_spec = images[0] root_device_name = image_spec['RootDeviceName'] block_device_mappings = image_spec['BlockDeviceMappings'] root_block_device_dict = next((x for x in block_device_mappings if x['DeviceName'] == root_device_name)) return root_block_device_dict def GetBlockDeviceMap(machine_type, root_volume_size_gb=None, image_id=None, region=None): """Returns the block device map to expose all devices for a given machine. Args: machine_type: The machine type to create a block device map for. root_volume_size_gb: The desired size of the root volume, in GiB, or None to the default provided by AWS. image_id: The image id (AMI) to use in order to lookup the default root device specs. This is only required if root_volume_size is specified. region: The region which contains the specified image. This is only required if image_id is specified. Returns: The json representation of the block device map for a machine compatible with the AWS CLI, or if the machine type has no local disks, it will return None. If root_volume_size_gb and image_id are provided, the block device map will include the specification for the root volume. Raises: ValueError: If required parameters are not passed. """ mappings = [] if root_volume_size_gb is not None: if image_id is None: raise ValueError( 'image_id must be provided if root_volume_size_gb is specified') if region is None: raise ValueError( 'region must be provided if image_id is specified') root_block_device = GetRootBlockDeviceSpecForImage(image_id, region) root_block_device['Ebs']['VolumeSize'] = root_volume_size_gb # The 'Encrypted' key must be removed or the CLI will complain if not FLAGS.aws_vm_hibernate: root_block_device['Ebs'].pop('Encrypted') else: root_block_device['Ebs']['Encrypted'] = True mappings.append(root_block_device) if (machine_type in aws_disk.NUM_LOCAL_VOLUMES and not aws_disk.LocalDriveIsNvme(machine_type)): for i in range(aws_disk.NUM_LOCAL_VOLUMES[machine_type]): od = collections.OrderedDict() od['VirtualName'] = 'ephemeral%s' % i od['DeviceName'] = '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i) mappings.append(od) if mappings: return json.dumps(mappings) return None def IsPlacementGroupCompatible(machine_type): """Returns True if VMs of 'machine_type' can be put in a placement group.""" prefix = machine_type.split('.')[0] return prefix not in NON_PLACEMENT_GROUP_PREFIXES def GetArmArchitecture(machine_type): """Returns the specific ARM processor architecture of the VM.""" # c6g.medium -> c6g, m6gd.large -> m6g, c5n.18xlarge -> c5 prefix = re.split(r'[dn]?\.', machine_type)[0] return _MACHINE_TYPE_PREFIX_TO_ARM_ARCH.get(prefix) def GetProcessorArchitecture(machine_type): """Returns the processor architecture of the VM.""" if GetArmArchitecture(machine_type): return ARM else: return X86 class AwsDedicatedHost(resource.BaseResource): """Object representing an AWS host. Attributes: region: The AWS region of the host. zone: The AWS availability zone of the host. machine_type: The machine type of VMs that may be created on the host. client_token: A uuid that makes the creation request idempotent. id: The host_id of the host. """ def __init__(self, machine_type, zone): super(AwsDedicatedHost, self).__init__() self.machine_type = machine_type self.zone = zone self.region = util.GetRegionFromZone(self.zone) self.client_token = str(uuid.uuid4()) self.id = None self.fill_fraction = 0.0 def _Create(self): create_cmd = util.AWS_PREFIX + [ 'ec2', 'allocate-hosts', '--region=%s' % self.region, '--client-token=%s' % self.client_token, '--instance-type=%s' % self.machine_type, '--availability-zone=%s' % self.zone, '--auto-placement=off', '--quantity=1'] vm_util.IssueCommand(create_cmd) def _Delete(self): if self.id: delete_cmd = util.AWS_PREFIX + [ 'ec2', 'release-hosts', '--region=%s' % self.region, '--host-ids=%s' % self.id] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) @vm_util.Retry() def _Exists(self): describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-hosts', '--region=%s' % self.region, '--filter=Name=client-token,Values=%s' % self.client_token] stdout, _, _ = vm_util.IssueCommand(describe_cmd) response = json.loads(stdout) hosts = response['Hosts'] assert len(hosts) < 2, 'Too many hosts.' if not hosts: return False host = hosts[0] self.id = host['HostId'] state = host['State'] assert state in KNOWN_HOST_STATES, state return state in HOST_EXISTS_STATES class AwsVmSpec(virtual_machine.BaseVmSpec): """Object containing the information needed to create an AwsVirtualMachine. Attributes: use_dedicated_host: bool. Whether to create this VM on a dedicated host. """ CLOUD = providers.AWS @classmethod def _ApplyFlags(cls, config_values, flag_values): """Modifies config options based on runtime flag values. Can be overridden by derived classes to add support for specific flags. Args: config_values: dict mapping config option names to provided values. May be modified by this function. flag_values: flags.FlagValues. Runtime flags that may override the provided config values. """ super(AwsVmSpec, cls)._ApplyFlags(config_values, flag_values) if flag_values['aws_boot_disk_size'].present: config_values['boot_disk_size'] = flag_values.aws_boot_disk_size if flag_values['aws_spot_instances'].present: config_values['use_spot_instance'] = flag_values.aws_spot_instances if flag_values['aws_spot_price'].present: config_values['spot_price'] = flag_values.aws_spot_price if flag_values['aws_spot_block_duration_minutes'].present: config_values['spot_block_duration_minutes'] = int( flag_values.aws_spot_block_duration_minutes) @classmethod def _GetOptionDecoderConstructions(cls): """Gets decoder classes and constructor args for each configurable option. Returns: dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair. The pair specifies a decoder class and its __init__() keyword arguments to construct in order to decode the named option. """ result = super(AwsVmSpec, cls)._GetOptionDecoderConstructions() result.update({ 'use_spot_instance': (option_decoders.BooleanDecoder, { 'default': False }), 'spot_price': (option_decoders.FloatDecoder, { 'default': None }), 'spot_block_duration_minutes': (option_decoders.IntDecoder, { 'default': None }), 'boot_disk_size': (option_decoders.IntDecoder, { 'default': None }) }) return result def _GetKeyfileSetKey(region): """Returns a key to use for the keyfile set. This prevents other runs in the same process from reusing the key. Args: region: The region the keyfile is in. """ return (region, FLAGS.run_uri) class AwsKeyFileManager(object): """Object for managing AWS Keyfiles.""" _lock = threading.Lock() imported_keyfile_set = set() deleted_keyfile_set = set() @classmethod def ImportKeyfile(cls, region): """Imports the public keyfile to AWS.""" with cls._lock: if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: return cat_cmd = ['cat', vm_util.GetPublicKeyPath()] keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd) formatted_tags = util.FormatTagSpecifications('key-pair', util.MakeDefaultTags()) import_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'import-key-pair', '--key-name=%s' % cls.GetKeyNameForRun(), '--public-key-material=%s' % keyfile, '--tag-specifications=%s' % formatted_tags, ] _, stderr, retcode = vm_util.IssueCommand( import_cmd, raise_on_failure=False) if retcode: if 'KeyPairLimitExceeded' in stderr: raise errors.Benchmarks.QuotaFailure( 'KeyPairLimitExceeded in %s: %s' % (region, stderr)) else: raise errors.Benchmarks.PrepareException(stderr) cls.imported_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: cls.deleted_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def DeleteKeyfile(cls, region): """Deletes the imported keyfile for a region.""" with cls._lock: if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: return delete_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'delete-key-pair', '--key-name=%s' % cls.GetKeyNameForRun()] util.IssueRetryableCommand(delete_cmd) cls.deleted_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: cls.imported_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def GetKeyNameForRun(cls): return 'perfkit-key-{0}'.format(FLAGS.run_uri) class AwsVirtualMachine(virtual_machine.BaseVirtualMachine): """Object representing an AWS Virtual Machine.""" CLOUD = providers.AWS # The IMAGE_NAME_FILTER is passed to the AWS CLI describe-images command to # filter images by name. This must be set by subclasses, but may be overridden # by the aws_image_name_filter flag. IMAGE_NAME_FILTER = None # The IMAGE_NAME_REGEX can be used to further filter images by name. It # applies after the IMAGE_NAME_FILTER above. Note that before this regex is # applied, Python's string formatting is used to replace {virt_type} and # {disk_type} by the respective virtualization type and root disk type of the # VM, allowing the regex to contain these strings. This regex supports # arbitrary Python regular expressions to further narrow down the set of # images considered. IMAGE_NAME_REGEX = None # List of projects that own the AMIs of this OS type. Default to # AWS Marketplace official image project. Note that opt-in regions may have a # different image owner than default regions. IMAGE_OWNER = MARKETPLACE_IMAGE_PROJECT # Some AMIs use a project code to find the latest (in addition to owner, and # filter) IMAGE_PRODUCT_CODE_FILTER = None # CoreOS only distinguishes between stable and testing images in the # description IMAGE_DESCRIPTION_FILTER = None DEFAULT_ROOT_DISK_TYPE = 'gp2' DEFAULT_USER_NAME = 'ec2-user' _lock = threading.Lock() deleted_hosts = set() host_map = collections.defaultdict(list) def __init__(self, vm_spec): """Initialize a AWS virtual machine. Args: vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm. Raises: ValueError: If an incompatible vm_spec is passed. """ super(AwsVirtualMachine, self).__init__(vm_spec) self.region = util.GetRegionFromZone(self.zone) self.user_name = FLAGS.aws_user_name or self.DEFAULT_USER_NAME if self.machine_type in aws_disk.NUM_LOCAL_VOLUMES: self.max_local_disks = aws_disk.NUM_LOCAL_VOLUMES[self.machine_type] self.user_data = None self.network = aws_network.AwsNetwork.GetNetwork(self) self.placement_group = getattr(vm_spec, 'placement_group', self.network.placement_group) self.firewall = aws_network.AwsFirewall.GetFirewall() self.use_dedicated_host = vm_spec.use_dedicated_host self.num_vms_per_host = vm_spec.num_vms_per_host self.use_spot_instance = vm_spec.use_spot_instance self.spot_price = vm_spec.spot_price self.spot_block_duration_minutes = vm_spec.spot_block_duration_minutes self.boot_disk_size = vm_spec.boot_disk_size self.client_token = str(uuid.uuid4()) self.host = None self.id = None self.metadata.update({ 'spot_instance': self.use_spot_instance, 'spot_price': self.spot_price, 'spot_block_duration_minutes': self.spot_block_duration_minutes, 'placement_group_strategy': self.placement_group.strategy if self.placement_group else placement_group.PLACEMENT_GROUP_NONE, 'aws_credit_specification': FLAGS.aws_credit_specification if FLAGS.aws_credit_specification else 'none' }) self.spot_early_termination = False self.spot_status_code = None # See: # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-os.html self._smp_affinity_script = 'smp_affinity.sh' if self.use_dedicated_host and util.IsRegion(self.zone): raise ValueError( 'In order to use dedicated hosts, you must specify an availability ' 'zone, not a region ("zone" was %s).' % self.zone) if self.use_dedicated_host and self.use_spot_instance: raise ValueError( 'Tenancy=host is not supported for Spot Instances') self.allocation_id = None self.association_id = None self.aws_tags = {} @property def host_list(self): """Returns the list of hosts that are compatible with this VM.""" return self.host_map[(self.machine_type, self.zone)] @property def group_id(self): """Returns the security group ID of this VM.""" return self.network.regional_network.vpc.default_security_group_id @classmethod def GetDefaultImage(cls, machine_type, region): """Returns the default image given the machine type and region. If specified, the aws_image_name_filter and aws_image_name_regex flags will override os_type defaults. Args: machine_type: The machine_type of the VM, used to determine virtualization type. region: The region of the VM, as images are region specific. Raises: AwsImageNotFoundError: If a default image cannot be found. Returns: The ID of the latest image, or None if no default image is configured or none can be found. """ # These cannot be REQUIRED_ATTRS, because nesting REQUIRED_ATTRS breaks. if not cls.IMAGE_OWNER: raise NotImplementedError('AWS OSMixins require IMAGE_OWNER') if not cls.IMAGE_NAME_FILTER: raise NotImplementedError('AWS OSMixins require IMAGE_NAME_FILTER') if FLAGS.aws_image_name_filter: cls.IMAGE_NAME_FILTER = FLAGS.aws_image_name_filter if FLAGS.aws_image_name_regex: cls.IMAGE_NAME_REGEX = FLAGS.aws_image_name_regex prefix = machine_type.split('.')[0] virt_type = PV if prefix in NON_HVM_PREFIXES else HVM processor_architecture = GetProcessorArchitecture(machine_type) describe_cmd = util.AWS_PREFIX + [ '--region=%s' % region, 'ec2', 'describe-images', '--query', ('Images[*].{Name:Name,ImageId:ImageId,' 'CreationDate:CreationDate}'), '--filters', 'Name=name,Values=%s' % cls.IMAGE_NAME_FILTER, 'Name=block-device-mapping.volume-type,Values=%s' % cls.DEFAULT_ROOT_DISK_TYPE, 'Name=virtualization-type,Values=%s' % virt_type, 'Name=architecture,Values=%s' % processor_architecture] if cls.IMAGE_PRODUCT_CODE_FILTER: describe_cmd.extend(['Name=product-code,Values=%s' % cls.IMAGE_PRODUCT_CODE_FILTER]) if cls.IMAGE_DESCRIPTION_FILTER: describe_cmd.extend(['Name=description,Values=%s' % cls.IMAGE_DESCRIPTION_FILTER]) describe_cmd.extend(['--owners'] + cls.IMAGE_OWNER) stdout, _ = util.IssueRetryableCommand(describe_cmd) if not stdout: raise AwsImageNotFoundError('aws describe-images did not produce valid ' 'output.') if cls.IMAGE_NAME_REGEX: # Further filter images by the IMAGE_NAME_REGEX filter. image_name_regex = cls.IMAGE_NAME_REGEX.format( virt_type=virt_type, disk_type=cls.DEFAULT_ROOT_DISK_TYPE, architecture=processor_architecture) images = [] excluded_images = [] for image in json.loads(stdout): if re.search(image_name_regex, image['Name']): images.append(image) else: excluded_images.append(image) if excluded_images: logging.debug('Excluded the following images with regex "%s": %s', image_name_regex, sorted(image['Name'] for image in excluded_images)) else: images = json.loads(stdout) if not images: raise AwsImageNotFoundError('No AMIs with given filters found.') return max(images, key=lambda image: image['CreationDate'])['ImageId'] @vm_util.Retry(max_retries=2) def _PostCreate(self): """Get the instance's data and tag it.""" describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id] logging.info('Getting instance %s public IP. This will fail until ' 'a public IP is available, but will be retried.', self.id) stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) instance = response['Reservations'][0]['Instances'][0] self.internal_ip = instance['PrivateIpAddress'] if util.IsRegion(self.zone): self.zone = str(instance['Placement']['AvailabilityZone']) assert self.group_id == instance['SecurityGroups'][0]['GroupId'], ( self.group_id, instance['SecurityGroups'][0]['GroupId']) if FLAGS.aws_efa: self._ConfigureEfa(instance) elif 'PublicIpAddress' in instance: self.ip_address = instance['PublicIpAddress'] else: raise errors.Resource.RetryableCreationError('Public IP not ready.') def _ConfigureEfa(self, instance): """Configuare EFA and associate Elastic IP. Args: instance: dict which contains instance info. """ if FLAGS.aws_efa_count > 1: self._ConfigureElasticIp(instance) else: self.ip_address = instance['PublicIpAddress'] if FLAGS.aws_efa_version: # Download EFA then call InstallEfa method so that subclass can override self.InstallPackages('curl') url = _EFA_URL.format(version=FLAGS.aws_efa_version) tarfile = posixpath.basename(url) self.RemoteCommand(f'curl -O {url}; tar -xzf {tarfile}') self._InstallEfa() # Run test program to confirm EFA working self.RemoteCommand('cd aws-efa-installer; ' 'PATH=${PATH}:/opt/amazon/efa/bin ./efa_test.sh') def _ConfigureElasticIp(self, instance): """Create and associate Elastic IP. Args: instance: dict which contains instance info. """ network_interface_id = None for network_interface in instance['NetworkInterfaces']: # The primary network interface (eth0) for the instance. if network_interface['Attachment']['DeviceIndex'] == 0: network_interface_id = network_interface['NetworkInterfaceId'] break assert network_interface_id is not None stdout, _, _ = vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'allocate-address', f'--region={self.region}', '--domain=vpc']) response = json.loads(stdout) self.ip_address = response['PublicIp'] self.allocation_id = response['AllocationId'] util.AddDefaultTags(self.allocation_id, self.region) stdout, _, _ = vm_util.IssueCommand( util.AWS_PREFIX + ['ec2', 'associate-address', f'--region={self.region}', f'--allocation-id={self.allocation_id}', f'--network-interface-id={network_interface_id}']) response = json.loads(stdout) self.association_id = response['AssociationId'] def _InstallEfa(self): """Installs AWS EFA packages. See https://aws.amazon.com/hpc/efa/ """ if not self.TryRemoteCommand('ulimit -l | grep unlimited'): self.RemoteCommand(f'echo "{self.user_name} - memlock unlimited" | ' 'sudo tee -a /etc/security/limits.conf') self.RemoteCommand('cd aws-efa-installer; sudo ./efa_installer.sh -y') if not self.TryRemoteCommand('ulimit -l | grep unlimited'): # efa_installer.sh should reboot enabling this change, reboot if necessary self.Reboot() def _CreateDependencies(self): """Create VM dependencies.""" AwsKeyFileManager.ImportKeyfile(self.region) # GetDefaultImage calls the AWS CLI. self.image = self.image or self.GetDefaultImage(self.machine_type, self.region) self.AllowRemoteAccessPorts() if self.use_dedicated_host: with self._lock: if (not self.host_list or (self.num_vms_per_host and self.host_list[-1].fill_fraction + 1.0 / self.num_vms_per_host > 1.0)): host = AwsDedicatedHost(self.machine_type, self.zone) self.host_list.append(host) host.Create() self.host = self.host_list[-1] if self.num_vms_per_host: self.host.fill_fraction += 1.0 / self.num_vms_per_host def _DeleteDependencies(self): """Delete VM dependencies.""" AwsKeyFileManager.DeleteKeyfile(self.region) if self.host: with self._lock: if self.host in self.host_list: self.host_list.remove(self.host) if self.host not in self.deleted_hosts: self.host.Delete() self.deleted_hosts.add(self.host) def _Create(self): """Create a VM instance.""" placement = [] if not util.IsRegion(self.zone): placement.append('AvailabilityZone=%s' % self.zone) if self.use_dedicated_host: placement.append('Tenancy=host,HostId=%s' % self.host.id) num_hosts = len(self.host_list) elif self.placement_group: if IsPlacementGroupCompatible(self.machine_type): placement.append('GroupName=%s' % self.placement_group.name) else: logging.warning( 'VM not placed in Placement Group. VM Type %s not supported', self.machine_type) placement = ','.join(placement) block_device_map = GetBlockDeviceMap(self.machine_type, self.boot_disk_size, self.image, self.region) if not self.aws_tags: # Set tags for the AWS VM. If we are retrying the create, we have to use # the same tags from the previous call. self.aws_tags.update(self.vm_metadata) self.aws_tags.update(util.MakeDefaultTags()) create_cmd = util.AWS_PREFIX + [ 'ec2', 'run-instances', '--region=%s' % self.region, '--client-token=%s' % self.client_token, '--image-id=%s' % self.image, '--instance-type=%s' % self.machine_type, '--key-name=%s' % AwsKeyFileManager.GetKeyNameForRun(), '--tag-specifications=%s' % util.FormatTagSpecifications('instance', self.aws_tags)] if FLAGS.aws_vm_hibernate: create_cmd.extend([ '--hibernation-options=Configured=true', ]) if FLAGS.disable_smt: query_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instance-types', '--instance-types', self.machine_type, '--query', 'InstanceTypes[0].VCpuInfo.DefaultCores' ] stdout, _, retcode = vm_util.IssueCommand(query_cmd) cores = int(json.loads(stdout)) create_cmd.append(f'--cpu-options=CoreCount={cores},ThreadsPerCore=1') if FLAGS.aws_efa: efas = ['--network-interfaces'] for device_index in range(FLAGS.aws_efa_count): efa_params = _EFA_PARAMS.copy() efa_params.update({ 'NetworkCardIndex': device_index, 'DeviceIndex': device_index, 'Groups': self.group_id, 'SubnetId': self.network.subnet.id }) if FLAGS.aws_efa_count == 1: efa_params['AssociatePublicIpAddress'] = True efas.append(','.join(f'{key}={value}' for key, value in sorted(efa_params.items()))) create_cmd.extend(efas) else: create_cmd.append('--associate-public-ip-address') create_cmd.append(f'--subnet-id={self.network.subnet.id}') if block_device_map: create_cmd.append('--block-device-mappings=%s' % block_device_map) if placement: create_cmd.append('--placement=%s' % placement) if FLAGS.aws_credit_specification: create_cmd.append('--credit-specification=%s' % FLAGS.aws_credit_specification) if self.user_data: create_cmd.append('--user-data=%s' % self.user_data) if self.capacity_reservation_id: create_cmd.append( '--capacity-reservation-specification=CapacityReservationTarget=' '{CapacityReservationId=%s}' % self.capacity_reservation_id) if self.use_spot_instance: instance_market_options = collections.OrderedDict() spot_options = collections.OrderedDict() spot_options['SpotInstanceType'] = 'one-time' spot_options['InstanceInterruptionBehavior'] = 'terminate' if self.spot_price: spot_options['MaxPrice'] = str(self.spot_price) if self.spot_block_duration_minutes: spot_options['BlockDurationMinutes'] = self.spot_block_duration_minutes instance_market_options['MarketType'] = 'spot' instance_market_options['SpotOptions'] = spot_options create_cmd.append( '--instance-market-options=%s' % json.dumps(instance_market_options)) _, stderr, retcode = vm_util.IssueCommand(create_cmd, raise_on_failure=False) arm_arch = GetArmArchitecture(self.machine_type) if arm_arch: self.host_arch = arm_arch if self.use_dedicated_host and 'InsufficientCapacityOnHost' in stderr: if self.num_vms_per_host: raise errors.Resource.CreationError( 'Failed to create host: %d vms of type %s per host exceeds ' 'memory capacity limits of the host' % (self.num_vms_per_host, self.machine_type)) else: logging.warning( 'Creation failed due to insufficient host capacity. A new host will ' 'be created and instance creation will be retried.') with self._lock: if num_hosts == len(self.host_list): host = AwsDedicatedHost(self.machine_type, self.zone) self.host_list.append(host) host.Create() self.host = self.host_list[-1] self.client_token = str(uuid.uuid4()) raise errors.Resource.RetryableCreationError() if 'InsufficientInstanceCapacity' in stderr: if self.use_spot_instance: self.spot_status_code = 'InsufficientSpotInstanceCapacity' self.spot_early_termination = True raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr) if 'SpotMaxPriceTooLow' in stderr: self.spot_status_code = 'SpotMaxPriceTooLow' self.spot_early_termination = True raise errors.Resource.CreationError(stderr) if 'InstanceLimitExceeded' in stderr or 'VcpuLimitExceeded' in stderr: raise errors.Benchmarks.QuotaFailure(stderr) if 'RequestLimitExceeded' in stderr: if FLAGS.retry_on_rate_limited: raise errors.Resource.RetryableCreationError(stderr) else: raise errors.Benchmarks.QuotaFailure(stderr) # When launching more than 1 VM into the same placement group, there is an # occasional error that the placement group has already been used in a # separate zone. Retrying fixes this error. if 'InvalidPlacementGroup.InUse' in stderr: raise errors.Resource.RetryableCreationError(stderr) if 'Unsupported' in stderr: raise errors.Benchmarks.UnsupportedConfigError(stderr) if retcode: raise errors.Resource.CreationError( 'Failed to create VM: %s return code: %s' % (retcode, stderr)) @vm_util.Retry( poll_interval=0.5, log_errors=True, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _WaitForStoppedStatus(self): """Returns the status of the VM. Returns: Whether the VM is suspended i.e. in a stopped status. If not, raises an error Raises: AwsUnknownStatusError: If an unknown status is returned from AWS. AwsTransitionalVmRetryableError: If the VM is pending. This is retried. """ describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instance-status', '--region=%s' % self.region, '--instance-ids=%s' % self.id, '--include-all-instances', ] stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) status = response['InstanceStatuses'][0]['InstanceState']['Name'] if status.lower() != 'stopped': logging.info('VM has status %s.', status) raise AwsTransitionalVmRetryableError() def _BeforeSuspend(self): """Prepares the instance for suspend by having the VM sleep for a given duration. This ensures the VM is ready for hibernation """ # Add a timer that waits for a given duration after vm instance is # created before calling suspend on the vm to ensure that the vm is # ready for hibernation in aws. time.sleep(600) def _PostSuspend(self): self._WaitForStoppedStatus() def _Suspend(self): """Suspends a VM instance.""" suspend_cmd = util.AWS_PREFIX + [ 'ec2', 'stop-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id, '--hibernate', ] try: vm_util.IssueCommand(suspend_cmd) except: raise errors.Benchmarks.KnownIntermittentError( 'Instance is still not ready to hibernate') self._PostSuspend() @vm_util.Retry( poll_interval=0.5, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _WaitForNewIP(self): """Checks for a new IP address, waiting if the VM is still pending. Raises: AwsTransitionalVmRetryableError: If VM is pending. This is retried. """ status_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] stdout, _, _ = vm_util.IssueCommand(status_cmd) response = json.loads(stdout) instance = response['Reservations'][0]['Instances'][0] if 'PublicIpAddress' in instance: self.ip_address = instance['PublicIpAddress'] else: logging.info('VM is pending.') raise AwsTransitionalVmRetryableError() def _PostResume(self): self._WaitForNewIP() def _Resume(self): """Resumes a VM instance.""" resume_cmd = util.AWS_PREFIX + [ 'ec2', 'start-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id, ] vm_util.IssueCommand(resume_cmd) self._PostResume() def _Delete(self): """Delete a VM instance.""" if self.id: delete_cmd = util.AWS_PREFIX + [ 'ec2', 'terminate-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) if hasattr(self, 'spot_instance_request_id'): cancel_cmd = util.AWS_PREFIX + [ '--region=%s' % self.region, 'ec2', 'cancel-spot-instance-requests', '--spot-instance-request-ids=%s' % self.spot_instance_request_id] vm_util.IssueCommand(cancel_cmd, raise_on_failure=False) if FLAGS.aws_efa: if self.association_id: vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'disassociate-address', f'--region={self.region}', f'--association-id={self.association_id}']) if self.allocation_id: vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'release-address', f'--region={self.region}', f'--allocation-id={self.allocation_id}']) # _Start or _Stop not yet implemented for AWS def _Start(self): """Starts the VM.""" if not self.id: raise errors.Benchmarks.RunError( 'Expected VM id to be non-null. Please make sure the VM exists.') start_cmd = util.AWS_PREFIX + [ 'ec2', 'start-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] vm_util.IssueCommand(start_cmd) def _PostStart(self): self._WaitForNewIP() def _Stop(self): """Stops the VM.""" if not self.id: raise errors.Benchmarks.RunError( 'Expected VM id to be non-null. Please make sure the VM exists.') stop_cmd = util.AWS_PREFIX + [ 'ec2', 'stop-instances', f'--region={self.region}', f'--instance-ids={self.id}' ] vm_util.IssueCommand(stop_cmd) def _PostStop(self): self._WaitForStoppedStatus() def _UpdateInterruptibleVmStatusThroughApi(self): if hasattr(self, 'spot_instance_request_id'): describe_cmd = util.AWS_PREFIX + [ '--region=%s' % self.region, 'ec2', 'describe-spot-instance-requests', '--spot-instance-request-ids=%s' % self.spot_instance_request_id] stdout, _, _ = vm_util.IssueCommand(describe_cmd) sir_response = json.loads(stdout)['SpotInstanceRequests'] self.spot_status_code = sir_response[0]['Status']['Code'] self.spot_early_termination = ( self.spot_status_code in AWS_INITIATED_SPOT_TERMINAL_STATUSES) @vm_util.Retry( poll_interval=1, log_errors=False, retryable_exceptions=(AwsTransitionalVmRetryableError,)) def _Exists(self): """Returns whether the VM exists. This method waits until the VM is no longer pending. Returns: Whether the VM exists. Raises: AwsUnknownStatusError: If an unknown status is returned from AWS. AwsTransitionalVmRetryableError: If the VM is pending. This is retried. """ describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', '--region=%s' % self.region, '--filter=Name=client-token,Values=%s' % self.client_token] stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) reservations = response['Reservations'] assert len(reservations) < 2, 'Too many reservations.' if not reservations: if not self.create_start_time: return False logging.info('No reservation returned by describe-instances. This ' 'sometimes shows up immediately after a successful ' 'run-instances command. Retrying describe-instances ' 'command.') raise AwsTransitionalVmRetryableError() instances = reservations[0]['Instances'] assert len(instances) == 1, 'Wrong number of instances.' status = instances[0]['State']['Name'] self.id = instances[0]['InstanceId'] if self.use_spot_instance: self.spot_instance_request_id = instances[0]['SpotInstanceRequestId'] if status not in INSTANCE_KNOWN_STATUSES: raise AwsUnknownStatusError('Unknown status %s' % status) if status in INSTANCE_TRANSITIONAL_STATUSES: logging.info('VM has status %s; retrying describe-instances command.', status) raise AwsTransitionalVmRetryableError() # In this path run-instances succeeded, a pending instance was created, but # not fulfilled so it moved to terminated. if (status == TERMINATED and instances[0]['StateReason']['Code'] == 'Server.InsufficientInstanceCapacity'): raise errors.Benchmarks.InsufficientCapacityCloudFailure( instances[0]['StateReason']['Message']) # In this path run-instances succeeded, a pending instance was created, but # instance is shutting down due to internal server error. This is a # retryable command for run-instance. # Client token needs to be refreshed for idempotency. if (status == SHUTTING_DOWN and instances[0]['StateReason']['Code'] == 'Server.InternalError'): self.client_token = str(uuid.uuid4()) return status in INSTANCE_EXISTS_STATUSES def _GetNvmeBootIndex(self): if (aws_disk.LocalDriveIsNvme(self.machine_type) and aws_disk.EbsDriveIsNvme(self.machine_type)): # identify boot drive # If this command ever fails consider 'findmnt -nM / -o source' cmd = ('realpath /dev/disk/by-label/cloudimg-rootfs ' '| grep --only-matching "nvme[0-9]*"') boot_drive = self.RemoteCommand(cmd, ignore_failure=True)[0].strip() if boot_drive: # get the boot drive index by dropping the nvme prefix boot_idx = int(boot_drive[4:]) logging.info('found boot drive at nvme index %d', boot_idx) return boot_idx else: logging.warning('Failed to identify NVME boot drive index. Assuming 0.') return 0 def CreateScratchDisk(self, disk_spec): """Create a VM's scratch disk. Args: disk_spec: virtual_machine.BaseDiskSpec object of the disk. Raises: CreationError: If an NFS disk is listed but the NFS service not created. """ # Instantiate the disk(s) that we want to create. disks = [] nvme_boot_drive_index = self._GetNvmeBootIndex() for _ in range(disk_spec.num_striped_disks): if disk_spec.disk_type == disk.NFS: data_disk = self._GetNfsService().CreateNfsDisk() else: data_disk = aws_disk.AwsDisk(disk_spec, self.zone, self.machine_type) if disk_spec.disk_type == disk.LOCAL: device_letter = chr(ord(DRIVE_START_LETTER) + self.local_disk_counter) data_disk.AssignDeviceLetter(device_letter, nvme_boot_drive_index) # Local disk numbers start at 1 (0 is the system disk). data_disk.disk_number = self.local_disk_counter + 1 self.local_disk_counter += 1 if self.local_disk_counter > self.max_local_disks: raise errors.Error('Not enough local disks.') elif disk_spec.disk_type == disk.NFS: pass else: # Remote disk numbers start at 1 + max_local disks (0 is the system disk # and local disks occupy [1, max_local_disks]). data_disk.disk_number = (self.remote_disk_counter + 1 + self.max_local_disks) self.remote_disk_counter += 1 disks.append(data_disk) self._CreateScratchDiskFromDisks(disk_spec, disks) def AddMetadata(self, **kwargs): """Adds metadata to the VM.""" util.AddTags(self.id, self.region, **kwargs) if self.use_spot_instance: util.AddDefaultTags(self.spot_instance_request_id, self.region) def InstallCli(self): """Installs the AWS cli and credentials on this AWS vm.""" self.Install('awscli') self.Install('aws_credentials') def DownloadPreprovisionedData(self, install_path, module_name, filename): """Downloads a data file from an AWS S3 bucket with pre-provisioned data. Use --aws_preprovisioned_data_bucket to specify the name of the bucket. Args: install_path: The install path on this VM. module_name: Name of the module associated with this data file. filename: The name of the file that was downloaded. """ self.InstallCli() # TODO(deitz): Add retry logic. self.RemoteCommand(GenerateDownloadPreprovisionedDataCommand( install_path, module_name, filename)) def ShouldDownloadPreprovisionedData(self, module_name, filename): """Returns whether or not preprovisioned data is available.""" self.Install('aws_credentials') self.Install('awscli') return FLAGS.aws_preprovisioned_data_bucket and self.TryRemoteCommand( GenerateStatPreprovisionedDataCommand(module_name, filename)) def IsInterruptible(self): """Returns whether this vm is an interruptible vm (spot vm). Returns: True if this vm is an interruptible vm (spot vm). """ return self.use_spot_instance def WasInterrupted(self): """Returns whether this spot vm was terminated early by AWS. Returns: True if this vm was terminated early by AWS. """ return self.spot_early_termination def GetVmStatusCode(self): """Returns the early termination code if any. Returns: Early termination code. """ return self.spot_status_code def GetResourceMetadata(self): """Returns a dict containing metadata about the VM. Returns: dict mapping string property key to value. """ result = super(AwsVirtualMachine, self).GetResourceMetadata() result['boot_disk_type'] = self.DEFAULT_ROOT_DISK_TYPE result['boot_disk_size'] = self.boot_disk_size if self.use_dedicated_host: result['num_vms_per_host'] = self.num_vms_per_host result['efa'] = FLAGS.aws_efa if FLAGS.aws_efa: result['efa_version'] = FLAGS.aws_efa_version result['efa_count'] = FLAGS.aws_efa_count result['preemptible'] = self.use_spot_instance return result class ClearBasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.ClearMixin): IMAGE_NAME_FILTER = 'clear/images/*/clear-*' DEFAULT_USER_NAME = 'clear' class CoreOsBasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CoreOsMixin): IMAGE_NAME_FILTER = 'fedora-coreos-*-hvm' # CoreOS only distinguishes between stable and testing in the description IMAGE_DESCRIPTION_FILTER = 'Fedora CoreOS stable *' IMAGE_OWNER = CENTOS_IMAGE_PROJECT DEFAULT_USER_NAME = 'core' class Debian9BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian9Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Stretch IMAGE_NAME_FILTER = 'debian-stretch-*64-*' IMAGE_OWNER = DEBIAN_9_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' def _BeforeSuspend(self): """Prepares the aws vm for hibernation.""" raise NotImplementedError() class Debian10BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian10Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster IMAGE_NAME_FILTER = 'debian-10-*64*' IMAGE_OWNER = DEBIAN_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' class Debian11BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Debian11Mixin): # From https://wiki.debian.org/Cloud/AmazonEC2Image/Buster IMAGE_NAME_FILTER = 'debian-11-*64*' IMAGE_OWNER = DEBIAN_IMAGE_PROJECT DEFAULT_USER_NAME = 'admin' class UbuntuBasedAwsVirtualMachine(AwsVirtualMachine): IMAGE_OWNER = UBUNTU_IMAGE_PROJECT DEFAULT_USER_NAME = 'ubuntu' class Ubuntu1604BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-xenial-16.04-*64-server-20*' def _InstallEfa(self): super(Ubuntu1604BasedAwsVirtualMachine, self)._InstallEfa() self.Reboot() self.WaitForBootCompletion() class Ubuntu1804BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1804Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-bionic-18.04-*64-server-20*' class Ubuntu1804EfaBasedAwsVirtualMachine( UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu1804EfaMixin): IMAGE_OWNER = UBUNTU_EFA_IMAGE_PROJECT IMAGE_NAME_FILTER = 'Deep Learning AMI GPU CUDA * (Ubuntu 18.04) *' class Ubuntu2004BasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.Ubuntu2004Mixin): IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-focal-20.04-*64-server-20*' class JujuBasedAwsVirtualMachine(UbuntuBasedAwsVirtualMachine, linux_virtual_machine.JujuMixin): """Class with configuration for AWS Juju virtual machines.""" IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-trusty-14.04-*64-server-20*' class AmazonLinux2BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.AmazonLinux2Mixin): """Class with configuration for AWS Amazon Linux 2 virtual machines.""" IMAGE_NAME_FILTER = 'amzn2-ami-*-*-*' IMAGE_OWNER = AMAZON_LINUX_IMAGE_PROJECT class Rhel7BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Rhel7Mixin): """Class with configuration for AWS RHEL 7 virtual machines.""" # Documentation on finding RHEL images: # https://access.redhat.com/articles/2962171 IMAGE_NAME_FILTER = 'RHEL-7*_GA*' IMAGE_OWNER = RHEL_IMAGE_PROJECT class Rhel8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.Rhel8Mixin): """Class with configuration for AWS RHEL 8 virtual machines.""" # Documentation on finding RHEL images: # https://access.redhat.com/articles/2962181 # All RHEL AMIs are HVM. HVM- blocks HVM_BETA. IMAGE_NAME_FILTER = 'RHEL-8*_HVM-*' IMAGE_OWNER = RHEL_IMAGE_PROJECT class CentOs7BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CentOs7Mixin): """Class with configuration for AWS CentOS 7 virtual machines.""" # Documentation on finding the CentOS 7 image: # https://wiki.centos.org/Cloud/AWS#x86_64 IMAGE_NAME_FILTER = 'CentOS 7*' IMAGE_OWNER = CENTOS_IMAGE_PROJECT DEFAULT_USER_NAME = 'centos' def _InstallEfa(self): logging.info('Upgrading Centos7 kernel, installing kernel headers and ' 'rebooting before installing EFA.') self.RemoteCommand('sudo yum upgrade -y kernel') self.InstallPackages('kernel-devel') self.Reboot() self.WaitForBootCompletion() super(CentOs7BasedAwsVirtualMachine, self)._InstallEfa() class CentOs8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.CentOs8Mixin): """Class with configuration for AWS CentOS 8 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS 8*' DEFAULT_USER_NAME = 'centos' class CentOsStream8BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.CentOsStream8Mixin): """Class with configuration for AWS CentOS Stream 8 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS Stream 8*' DEFAULT_USER_NAME = 'centos' class RockyLinux8BasedAwsVirtualMachine(AwsVirtualMachine, linux_virtual_machine.RockyLinux8Mixin): """Class with configuration for AWS Rocky Linux 8 virtual machines.""" IMAGE_OWNER = MARKETPLACE_IMAGE_PROJECT IMAGE_PRODUCT_CODE_FILTER = 'cotnnspjrsi38lfn8qo4ibnnm' IMAGE_NAME_FILTER = 'Rocky-8-*' DEFAULT_USER_NAME = 'rocky' class CentOsStream9BasedAwsVirtualMachine( AwsVirtualMachine, linux_virtual_machine.CentOsStream9Mixin): """Class with configuration for AWS CentOS Stream 9 virtual machines.""" # This describes the official AMIs listed here: # https://wiki.centos.org/Cloud/AWS#Official_CentOS_Linux_:_Public_Images IMAGE_OWNER = CENTOS_IMAGE_PROJECT IMAGE_NAME_FILTER = 'CentOS Stream 9*' class BaseWindowsAwsVirtualMachine(AwsVirtualMachine, windows_virtual_machine.BaseWindowsMixin): """Support for Windows machines on AWS.""" DEFAULT_USER_NAME = 'Administrator' IMAGE_OWNER = WINDOWS_IMAGE_PROJECT def __init__(self, vm_spec): super(BaseWindowsAwsVirtualMachine, self).__init__(vm_spec) self.user_data = ('<powershell>%s</powershell>' % windows_virtual_machine.STARTUP_SCRIPT) @vm_util.Retry() def _GetDecodedPasswordData(self): # Retrieve a base64 encoded, encrypted password for the VM. get_password_cmd = util.AWS_PREFIX + [ 'ec2', 'get-password-data', '--region=%s' % self.region, '--instance-id=%s' % self.id] stdout, _ = util.IssueRetryableCommand(get_password_cmd) response = json.loads(stdout) password_data = response['PasswordData'] # AWS may not populate the password data until some time after # the VM shows as running. Simply retry until the data shows up. if not password_data: raise ValueError('No PasswordData in response.') # Decode the password data. return base64.b64decode(password_data) def _PostCreate(self): """Retrieve generic VM info and then retrieve the VM's password.""" super(BaseWindowsAwsVirtualMachine, self)._PostCreate() # Get the decoded password data. decoded_password_data = self._GetDecodedPasswordData() # Write the encrypted data to a file, and use openssl to # decrypt the password. with vm_util.NamedTemporaryFile() as tf: tf.write(decoded_password_data) tf.close() decrypt_cmd = ['openssl', 'rsautl', '-decrypt', '-in', tf.name, '-inkey', vm_util.GetPrivateKeyPath()] password, _ = vm_util.IssueRetryableCommand(decrypt_cmd) self.password = password def GetResourceMetadata(self): """Returns a dict containing metadata about the VM. Returns: dict mapping metadata key to value. """ result = super(BaseWindowsAwsVirtualMachine, self).GetResourceMetadata() result['disable_interrupt_moderation'] = self.disable_interrupt_moderation return result @vm_util.Retry( max_retries=10, retryable_exceptions=(AwsUnexpectedWindowsAdapterOutputError, errors.VirtualMachine.RemoteCommandError)) def DisableInterruptModeration(self): """Disable the networking feature 'Interrupt Moderation'.""" # First ensure that the driver supports interrupt moderation net_adapters, _ = self.RemoteCommand('Get-NetAdapter') if 'Intel(R) 82599 Virtual Function' not in net_adapters: raise AwsDriverDoesntSupportFeatureError( 'Driver not tested with Interrupt Moderation in PKB.') aws_int_dis_path = ('HKLM\\SYSTEM\\ControlSet001\\Control\\Class\\' '{4d36e972-e325-11ce-bfc1-08002be10318}\\0011') command = 'reg add "%s" /v *InterruptModeration /d 0 /f' % aws_int_dis_path self.RemoteCommand(command) try: self.RemoteCommand('Restart-NetAdapter -Name "Ethernet 2"') except IOError: # Restarting the network adapter will always fail because # the winrm connection used to issue the command will be # broken. pass int_dis_value, _ = self.RemoteCommand( 'reg query "%s" /v *InterruptModeration' % aws_int_dis_path) # The second line should look like: # *InterruptModeration REG_SZ 0 registry_query_lines = int_dis_value.splitlines() if len(registry_query_lines) < 3: raise AwsUnexpectedWindowsAdapterOutputError( 'registry query failed: %s ' % int_dis_value) registry_query_result = registry_query_lines[2].split() if len(registry_query_result) < 3: raise AwsUnexpectedWindowsAdapterOutputError( 'unexpected registry query response: %s' % int_dis_value) if registry_query_result[2] != '0': raise AwsUnexpectedWindowsAdapterOutputError( 'InterruptModeration failed to disable') class Windows2012CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2012CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2012-R2_RTM-English-64Bit-Core-*' class Windows2016CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2016CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2016-English-Core-Base-*' class Windows2019CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Core-Base-*' class Windows2022CoreAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022CoreMixin): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Core-Base-*' class Windows2012DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2012DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2012-R2_RTM-English-64Bit-Base-*' class Windows2016DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2016DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2016-English-Full-Base-*' class Windows2019DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-Base-*' class Windows2022DesktopAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022DesktopMixin): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-Base-*' class Windows2019DesktopSQLServer2019StandardAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019SQLServer2019Standard): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-SQL_2019_Standard-*' class Windows2019DesktopSQLServer2019EnterpriseAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2019SQLServer2019Enterprise): IMAGE_NAME_FILTER = 'Windows_Server-2019-English-Full-SQL_2019_Enterprise-*' class Windows2022DesktopSQLServer2019StandardAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022SQLServer2019Standard): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-SQL_2019_Standard-*' class Windows2022DesktopSQLServer2019EnterpriseAwsVirtualMachine( BaseWindowsAwsVirtualMachine, windows_virtual_machine.Windows2022SQLServer2019Enterprise): IMAGE_NAME_FILTER = 'Windows_Server-2022-English-Full-SQL_2019_Enterprise-*' def GenerateDownloadPreprovisionedDataCommand(install_path, module_name, filename): """Returns a string used to download preprovisioned data.""" return 'aws s3 cp --only-show-errors s3://%s/%s/%s %s' % ( FLAGS.aws_preprovisioned_data_bucket, module_name, filename, posixpath.join(install_path, filename)) def GenerateStatPreprovisionedDataCommand(module_name, filename): """Returns a string used to download preprovisioned data.""" return 'aws s3api head-object --bucket %s --key %s/%s' % ( FLAGS.aws_preprovisioned_data_bucket, module_name, filename)
#!/usr/bin/python from __future__ import unicode_literals, print_function, division from datetime import datetime, timedelta api_ok = True try: import tvdb_api import tvdb_exceptions except: print("tvdb api not available") api_ok = False from operator import itemgetter def command_ep(bot, user, channel, args): """Usage: ep <series name>""" if not api_ok: return t = tvdb_api.Tvdb() now = datetime.now() # one day resolution maximum now = now.replace(hour=0, minute=0, second=0, microsecond=0) # prevent "Series '' not found" if not args: return try: series = t[args] except tvdb_exceptions.tvdb_shownotfound: bot.say(channel, "Series '%s' not found" % args) return future_episodes = [] all_episodes = [] # find all episodes with airdate > now for season_no, season in series.items(): for episode_no, episode in season.items(): firstaired = episode['firstaired'] if not firstaired: continue airdate = datetime.strptime(firstaired, "%Y-%m-%d") td = airdate - now all_episodes.append(episode) # list all unaired episodes if td >= timedelta(0, 0, 0): future_episodes.append(episode) # if any future episodes were found, find out the one with airdate closest to now if future_episodes: # sort the list just in case it's out of order (specials are season 0) future_episodes = sorted(future_episodes, key=itemgetter('firstaired')) episode = future_episodes[0] td = datetime.strptime(episode['firstaired'], "%Y-%m-%d") - now if td.days == 1: airdate = "tomorrow" elif td.days > 1: airdate = "%s (%d days)" % (episode['firstaired'], td.days) else: airdate = "today" season_ep = "%dx%02d" % (int(episode['combined_season']),int(episode['combined_episodenumber'])) msg = "Next episode of %s %s '%s' airs %s" % (series.data['seriesname'], season_ep, episode['episodename'], airdate) # no future episodes found, show the latest one elif all_episodes: # find latst episode of the show all_episodes = sorted(all_episodes, key=itemgetter('firstaired')) episode = all_episodes[-1] td = now - datetime.strptime(episode['firstaired'], "%Y-%m-%d") airdate = "%s (%d days ago)" % (episode['firstaired'], td.days) season_ep = "%dx%02d" % (int(episode['combined_season']),int(episode['combined_episodenumber'])) msg = "Latest episode of %s %s '%s' aired %s" % (series.data['seriesname'], season_ep, episode['episodename'], airdate) else: msg = "No new or past episode airdates found for %s" % series.data['seriesname'] bot.say(channel, msg.encode("UTF-8")) Handle episode numbers returned as floats Display episode age in years and days #!/usr/bin/python from __future__ import unicode_literals, print_function, division from datetime import datetime, timedelta api_ok = True try: import tvdb_api import tvdb_exceptions except: print("tvdb api not available") api_ok = False from operator import itemgetter def command_ep(bot, user, channel, args): """Usage: ep <series name>""" if not api_ok: return t = tvdb_api.Tvdb() now = datetime.now() # one day resolution maximum now = now.replace(hour=0, minute=0, second=0, microsecond=0) # prevent "Series '' not found" if not args: return try: series = t[args] except tvdb_exceptions.tvdb_shownotfound: bot.say(channel, "Series '%s' not found" % args) return future_episodes = [] all_episodes = [] # find all episodes with airdate > now for season_no, season in series.items(): for episode_no, episode in season.items(): firstaired = episode['firstaired'] if not firstaired: continue airdate = datetime.strptime(firstaired, "%Y-%m-%d") td = airdate - now all_episodes.append(episode) # list all unaired episodes if td >= timedelta(0, 0, 0): future_episodes.append(episode) # if any future episodes were found, find out the one with airdate closest to now if future_episodes: # sort the list just in case it's out of order (specials are season 0) future_episodes = sorted(future_episodes, key=itemgetter('firstaired')) episode = future_episodes[0] td = datetime.strptime(episode['firstaired'], "%Y-%m-%d") - now if td.days == 1: airdate = "tomorrow" elif td.days > 1: airdate = "%s (%d days)" % (episode['firstaired'], td.days) else: airdate = "today" season_ep = "%dx%02d" % (int(episode['combined_season']),int(episode['combined_episodenumber'])) msg = "Next episode of %s %s '%s' airs %s" % (series.data['seriesname'], season_ep, episode['episodename'], airdate) # no future episodes found, show the latest one elif all_episodes: # find latest episode of the show all_episodes = sorted(all_episodes, key=itemgetter('firstaired')) episode = all_episodes[-1] ## episode age in years and days td = now - datetime.strptime(episode['firstaired'], "%Y-%m-%d") years, days = td.days // 365, td.days % 365 agestr = [] if years > 1: agestr.append("%d years" % years) if days > 0: agestr.append("%d days" % days) airdate = "%s (%s ago)" % (episode['firstaired'], " ".join(agestr)) season_no = int(episode['combined_season']) # the episode number is sometimes returned as a float, hack it. episode_no = int(float(episode['combined_episodenumber'])) season_ep = "%dx%02d" % (season_no, episode_no) msg = "Latest episode of %s %s '%s' aired %s" % (series.data['seriesname'], season_ep, episode['episodename'], airdate) else: msg = "No new or past episode airdates found for %s" % series.data['seriesname'] bot.say(channel, msg.encode("UTF-8"))
import json import os.path as path import urllib import urllib2 from datetime import datetime from time import strptime from models import * # sub and dub is 0=both 1=sub only 2=dub only if path.exists('settings'): try: config = open('settings', 'r') sub_dub = int(config.read()) except: config = open('settings', 'w') config.write('0') config.close() sub_dub = 0 else: config = open('settings', 'w') config.write('0') config.close() sub_dub = 0 base_url = 'http://wpc.8c48.edgecastcdn.net' bitrate = [2000, 3500, 4000] funimation_url = 'https://www.funimation.com/' order_types = ['asc', 'desc'] rating_type = ['tvpg', 'tv14', 'tvma', 'nr', 'pg', 'pg13', 'r', 'all'] sort_types = ['alpha', 'date', 'dvd', 'now', 'soon', 'votes', 'episode', 'title', 'sequence'] genre_types = ['all', 'action', 'adventure', 'bishonen', 'bishoujo', 'comedy', 'cyberpunk', 'drama', 'fan service', 'fantasy', 'harem', 'historical', 'horror', 'live action', 'magical girl', 'martial arts', 'mecha', 'moe', 'mystery', 'reverse harem', 'romance', 'school', 'sci fi', 'shonen', 'slice of life', 'space', 'sports', 'super power', 'supernatural', 'yuri'] urls = { 'details': 'mobile/node/{showid}', 'search': 'mobile/shows.json/alpha/asc/nl/all/all?keys={term}', 'shows': 'mobile/shows.json/{sort}/{order}/{limit}/{rating}/{genre}', 'clips': 'mobile/clips.json/sequence/{order}/{showid}/all/all?page={page}', 'trailers': 'mobile/trailers.json/sequence/{order}/{showid}/all/all?page={page}', 'movies': 'mobile/movies.json/{v_type}/{sort}/{order}/all/{showid}?page={page}', 'episodes': 'mobile/episodes.json/{v_type}/sequence/{order}/all/{showid}?page={page}', 'stream': 'http://wpc.8c48.edgecastcdn.net/038C48/SV/480/{video_id}/{video_id}-480-{quality}K.mp4.m3u8?9b303b6c62204a9dcb5ce5f5c607', } def fix_keys(d): def fix_key(key): return key.lower().replace(' ', '_').replace('-', '_') def fix(x): if isinstance(x, dict): return dict((fix_key(k), fix(v)) for k, v in x.iteritems()) elif isinstance(x, list): return [fix(i) for i in x] else: return x return fix(d) def convert_values(d): for k, v in d.items(): if k == 'video_section' or k == 'aip': d[k] = v.values() if isinstance(v, dict) else [] elif k == 'votes' or k == 'nid' or k == 'show_id': d[k] = int(v) if v is not None else 0 elif k == 'episode_number': d[k] = int(float(v)) if v is not None else 0 elif k == 'post_date': try: d[k] = datetime.strptime(v, '%m/%d/%Y') except TypeError: d[k] = datetime(*(strptime(v, '%m/%d/%Y')[0:6])) elif k == 'duration': d[k] = v elif k == 'all_terms' or k == 'term': d[k] = v.split(', ') elif k == 'similar_shows': d[k] = [int(i) for i in v.split(',') if isinstance(i, list)] elif k == 'video_quality': d[k] = v.values() if isinstance(v, dict) else [d[k]] elif k == 'promo': d[k] = v == 'Promo' elif k == 'type': d[k] = v[7:] elif k == 'maturity_rating': d[k] = str(v) elif k == 'mpaa': d[k] = ','.join(v.values()) if isinstance(v, dict) else v return d def process_response(data): # collapse data into list of dicts data = [i[i.keys()[0]] for i in data[data.keys()[0]]] # fix dict key names data = fix_keys(data) # fix up the values data = [convert_values(i) for i in data] if data[0].has_key('group_title'): return [EpisodeDetail(**i) for i in data] elif data[0].has_key('maturity_rating'): return [Show(**i) for i in data] elif data[0].has_key('episode_number'): return [Episode(**i) for i in data] elif data[0].has_key('tv_key_art'): return [Movie(**i) for i in data] elif data[0].has_key('funimationid'): return [Clip(**i) for i in data] elif data[0].has_key('is_mature'): return [Trailer(**i) for i in data] else: return data def filter_response(data): # just check the first object since all will be the same if data[0].get('sub_dub') is None: return data # both if sub_dub == 0: return data # sub elif sub_dub == 1: ret = [ep for ep in data if ep.sub] return ret # dub elif sub_dub == 2: ret = [ep for ep in data if ep.dub] return ret else: # just in case return data def process_data(url): resp = get(url) data = process_response(resp) return filter_response(data) def get_data_url(endpoint, series=0): if endpoint == 'shows': params = check_params(sort='alpha') url = urls[endpoint].format(**params) return url if series != 0: params = check_params(showid=series) url = urls[endpoint].format(**params) return url params = check_params() url = urls[endpoint].format(**params) return url def check_params(showid=0, page=0, sort=None, order=None, limit=None, rating=None, genre=None, term=None): if sort is None or sort not in sort_types: sort = 'date' if order is None or order not in order_types: order = 'asc' if limit is None or not limit.isdigit(): limit = 'nl' # no limit if rating is None or rating not in rating_type: rating = 'all' if genre is None or genre not in genre_types: genre = 'all' if term is None: term = '' v_type = 'subscription' return locals() def get(endpoint, params=None): if endpoint.startswith('http'): url = endpoint else: url = base_url.format(endpoint) if params is None: content = urllib2.urlopen(url).read() else: content = urllib2.urlopen(url + urllib.urlencode(params)).read() return json.loads(content) def stream_url(video_id, quality): url = urls['stream'].format(**locals()) return url def qual(episode): q = len(episode.video_quality) - 1 return bitrate[q] def get_shows(): show_url = get_data_url('shows') shows = process_data(funimation_url + show_url) return shows def print_shows(show_list): for item in range(0, len(show_list)): title = show_list[item].title print item, ':', title, '- nid:', show_list[item].nid def print_eps(ep_list): for item in range(0,len(ep_list)): title = ep_list[item].title ep_number = ep_list[item].episode_number lang = ep_list[item].sub_dub ep_id = ep_list[item].funimation_id print ep_number,':',title,'-',lang,':',ep_id Changed show episode print to print url import json import os.path as path import urllib import urllib2 from datetime import datetime from time import strptime from models import * # sub and dub is 0=both 1=sub only 2=dub only if path.exists('settings'): try: config = open('settings', 'r') sub_dub = int(config.read()) except: config = open('settings', 'w') config.write('0') config.close() sub_dub = 0 else: config = open('settings', 'w') config.write('0') config.close() sub_dub = 0 base_url = 'http://wpc.8c48.edgecastcdn.net' bitrate = [2000, 3500, 4000] funimation_url = 'https://www.funimation.com/' order_types = ['asc', 'desc'] rating_type = ['tvpg', 'tv14', 'tvma', 'nr', 'pg', 'pg13', 'r', 'all'] sort_types = ['alpha', 'date', 'dvd', 'now', 'soon', 'votes', 'episode', 'title', 'sequence'] genre_types = ['all', 'action', 'adventure', 'bishonen', 'bishoujo', 'comedy', 'cyberpunk', 'drama', 'fan service', 'fantasy', 'harem', 'historical', 'horror', 'live action', 'magical girl', 'martial arts', 'mecha', 'moe', 'mystery', 'reverse harem', 'romance', 'school', 'sci fi', 'shonen', 'slice of life', 'space', 'sports', 'super power', 'supernatural', 'yuri'] urls = { 'details': 'mobile/node/{showid}', 'search': 'mobile/shows.json/alpha/asc/nl/all/all?keys={term}', 'shows': 'mobile/shows.json/{sort}/{order}/{limit}/{rating}/{genre}', 'clips': 'mobile/clips.json/sequence/{order}/{showid}/all/all?page={page}', 'trailers': 'mobile/trailers.json/sequence/{order}/{showid}/all/all?page={page}', 'movies': 'mobile/movies.json/{v_type}/{sort}/{order}/all/{showid}?page={page}', 'episodes': 'mobile/episodes.json/{v_type}/sequence/{order}/all/{showid}?page={page}', 'stream': 'http://wpc.8c48.edgecastcdn.net/038C48/SV/480/{video_id}/{video_id}-480-{quality}K.mp4.m3u8?9b303b6c62204a9dcb5ce5f5c607', } def fix_keys(d): def fix_key(key): return key.lower().replace(' ', '_').replace('-', '_') def fix(x): if isinstance(x, dict): return dict((fix_key(k), fix(v)) for k, v in x.iteritems()) elif isinstance(x, list): return [fix(i) for i in x] else: return x return fix(d) def convert_values(d): for k, v in d.items(): if k == 'video_section' or k == 'aip': d[k] = v.values() if isinstance(v, dict) else [] elif k == 'votes' or k == 'nid' or k == 'show_id': d[k] = int(v) if v is not None else 0 elif k == 'episode_number': d[k] = int(float(v)) if v is not None else 0 elif k == 'post_date': try: d[k] = datetime.strptime(v, '%m/%d/%Y') except TypeError: d[k] = datetime(*(strptime(v, '%m/%d/%Y')[0:6])) elif k == 'duration': d[k] = v elif k == 'all_terms' or k == 'term': d[k] = v.split(', ') elif k == 'similar_shows': d[k] = [int(i) for i in v.split(',') if isinstance(i, list)] elif k == 'video_quality': d[k] = v.values() if isinstance(v, dict) else [d[k]] elif k == 'promo': d[k] = v == 'Promo' elif k == 'type': d[k] = v[7:] elif k == 'maturity_rating': d[k] = str(v) elif k == 'mpaa': d[k] = ','.join(v.values()) if isinstance(v, dict) else v return d def process_response(data): # collapse data into list of dicts data = [i[i.keys()[0]] for i in data[data.keys()[0]]] # fix dict key names data = fix_keys(data) # fix up the values data = [convert_values(i) for i in data] if data[0].has_key('group_title'): return [EpisodeDetail(**i) for i in data] elif data[0].has_key('maturity_rating'): return [Show(**i) for i in data] elif data[0].has_key('episode_number'): return [Episode(**i) for i in data] elif data[0].has_key('tv_key_art'): return [Movie(**i) for i in data] elif data[0].has_key('funimationid'): return [Clip(**i) for i in data] elif data[0].has_key('is_mature'): return [Trailer(**i) for i in data] else: return data def filter_response(data): # just check the first object since all will be the same if data[0].get('sub_dub') is None: return data # both if sub_dub == 0: return data # sub elif sub_dub == 1: ret = [ep for ep in data if ep.sub] return ret # dub elif sub_dub == 2: ret = [ep for ep in data if ep.dub] return ret else: # just in case return data def process_data(url): resp = get(url) data = process_response(resp) return filter_response(data) def get_data_url(endpoint, series=0): if endpoint == 'shows': params = check_params(sort='alpha') url = urls[endpoint].format(**params) return url if series != 0: params = check_params(showid=series) url = urls[endpoint].format(**params) return url params = check_params() url = urls[endpoint].format(**params) return url def check_params(showid=0, page=0, sort=None, order=None, limit=None, rating=None, genre=None, term=None): if sort is None or sort not in sort_types: sort = 'date' if order is None or order not in order_types: order = 'asc' if limit is None or not limit.isdigit(): limit = 'nl' # no limit if rating is None or rating not in rating_type: rating = 'all' if genre is None or genre not in genre_types: genre = 'all' if term is None: term = '' v_type = 'subscription' return locals() def get(endpoint, params=None): if endpoint.startswith('http'): url = endpoint else: url = base_url.format(endpoint) if params is None: content = urllib2.urlopen(url).read() else: content = urllib2.urlopen(url + urllib.urlencode(params)).read() return json.loads(content) def stream_url(video_id, quality): url = urls['stream'].format(**locals()) return url def qual(episode): q = len(episode.video_quality) - 1 return bitrate[q] def get_shows(): show_url = get_data_url('shows') shows = process_data(funimation_url + show_url) return shows def print_shows(show_list): for item in range(0, len(show_list)): title = show_list[item].title print item, ':', title, '- nid:', show_list[item].nid def print_eps(ep_list): for item in range(0,len(ep_list)): title = ep_list[item].title ep_number = ep_list[item].episode_number lang = ep_list[item].sub_dub ep_url = stream_url(ep_list[item].funimation_id,qual(ep_list[item])) print ep_number,':',title,'-',lang,':',ep_url
# -*- coding: utf-8 -*- from __future__ import print_function from builtins import object import websocket import logging from purkinje_messages.message import TestCaseStartEvent class TestMonitorPlugin(object): """py.test plugin for monitoring test progress and capturing results TODO graceful termination """ def __init__(self, websocket_url): self.reports = [] self._websocket_url = websocket_url self._websocket = None try: self._log('Connecting to WebSocket %s', websocket_url) self._websocket = websocket.create_connection(websocket_url) except ValueError as e: self._log('Invalid WebSocket URL: "%s"', self._websocket_url) except Exception as e: self._log('Error connecting to WebSocket at URL %s: %s', self._websocket_url, e) def is_websocket_connected(self): return self._websocket is not None def send_event(self, event): """Send event via WebSocket connection. If there is no connection, the event will be dumped to the log only, so it is possible to run tests with purkinje enabled even if the server should be down """ ser_event = None try: ser_event = event.serialize() if self._websocket: self._websocket.send(ser_event) else: self._log('purkinje server not available; event: %s', ser_event) except Exception as e: logging.exception(e) self._log('Error while sending event "%s": %s', ser_event or event.data, e) def pytest_sessionfinish(self): self._log('*** py.test session finished ***') # def pytest_collect_file(self, path, parent): # print('pytest_collect_file: {}'.format(path)) def pytest_collection_modifyitems(self, session, config, items): print('pytest_collection_modifyitems: {} {} {}'.format(session, config, items)) def pytest_collectstart(self, collector): self._log('pytest_collectstart: %s', collector) # import pdb; pdb.set_trace() def pytest_collectreport(self, report): self._log('pytest_collectreport: %s', report) self.send_event(TestCaseStartEvent(text='TODO xyz')) self.reports.append(report) #import pdb #pdb.set_trace() def _log(self, fmt, *args): # TODO use print, logging or py.test facility if it exists fmt = '** testmon: %s **' % fmt print(fmt % args) def pytest_addoption(parser): parser.addoption('--websocket_url') def pytest_configure(config): websocket_url = config.getoption('websocket_url') config.pluginmanager.register(TestMonitorPlugin(websocket_url)) PEP8 fix # -*- coding: utf-8 -*- from __future__ import print_function from builtins import object import websocket import logging from purkinje_messages.message import TestCaseStartEvent class TestMonitorPlugin(object): """py.test plugin for monitoring test progress and capturing results TODO graceful termination """ def __init__(self, websocket_url): self.reports = [] self._websocket_url = websocket_url self._websocket = None try: self._log('Connecting to WebSocket %s', websocket_url) self._websocket = websocket.create_connection(websocket_url) except ValueError as e: self._log('Invalid WebSocket URL: "%s"', self._websocket_url) except Exception as e: self._log('Error connecting to WebSocket at URL %s: %s', self._websocket_url, e) def is_websocket_connected(self): return self._websocket is not None def send_event(self, event): """Send event via WebSocket connection. If there is no connection, the event will be dumped to the log only, so it is possible to run tests with purkinje enabled even if the server should be down """ ser_event = None try: ser_event = event.serialize() if self._websocket: self._websocket.send(ser_event) else: self._log('purkinje server not available; event: %s', ser_event) except Exception as e: logging.exception(e) self._log('Error while sending event "%s": %s', ser_event or event.data, e) def pytest_sessionfinish(self): self._log('*** py.test session finished ***') # def pytest_collect_file(self, path, parent): # print('pytest_collect_file: {}'.format(path)) def pytest_collection_modifyitems(self, session, config, items): print('pytest_collection_modifyitems: {} {} {}'.format(session, config, items)) def pytest_collectstart(self, collector): self._log('pytest_collectstart: %s', collector) # import pdb; pdb.set_trace() def pytest_collectreport(self, report): self._log('pytest_collectreport: %s', report) self.send_event(TestCaseStartEvent(text='TODO xyz')) self.reports.append(report) # import pdb # pdb.set_trace() def _log(self, fmt, *args): # TODO use print, logging or py.test facility if it exists fmt = '** testmon: %s **' % fmt print(fmt % args) def pytest_addoption(parser): parser.addoption('--websocket_url') def pytest_configure(config): websocket_url = config.getoption('websocket_url') config.pluginmanager.register(TestMonitorPlugin(websocket_url))
""" Highly experimental script that compiles the CPython standard library using Cython. Execute the script either in the CPython 'Lib' directory or pass the option '--current-python' to compile the standard library of the running Python interpreter. Pass '--parallel' to get a parallel build. Usage example:: $ python cystdlib.py --current-python build_ext -i """ import sys from distutils.core import setup from Cython.Build import cythonize from Cython.Compiler import Options # improve Python compatibility by allowing some broken code Options.error_on_unknown_names = False excludes = ['**/test/**/*.py', '**/tests/**/*.py', '**/__init__.py'] broken = [ 'idlelib/MultiCall.py', 'email/utils.py', 'multiprocessing/reduction.py', 'multiprocessing/util.py', 'threading.py', # interrupt handling 'lib2to3/fixes/fix_sys_exc.py', 'traceback.py', ] default_directives = dict( auto_cpdef=False, # enable when it's safe, see long list of failures below binding=True, set_initial_path='SOURCEFILE') special_directives = [ (['pkgutil.py', 'decimal.py', 'datetime.py', 'optparse.py', 'sndhdr.py', 'opcode.py', 'ntpath.py', 'urllib/request.py', 'plat-*/TYPES.py', 'plat-*/IN.py', 'tkinter/_fix.py', 'lib2to3/refactor.py', 'webbrowser.py', 'shutil.py', 'multiprocessing/forking.py', 'xml/sax/expatreader.py', 'xmlrpc/client.py', 'pydoc.py', 'xml/etree/ElementTree.py', 'posixpath.py', 'inspect.py', 'ctypes/util.py', 'urllib/parse.py', 'warnings.py', 'tempfile.py', 'trace.py', 'heapq.py', 'pickletools.py', 'multiprocessing/connection.py', 'hashlib.py', 'getopt.py', 'os.py', 'types.py', ], dict(auto_cpdef=False)), ] del special_directives[:] # currently unused def build_extensions(includes='**/*.py', excludes=excludes+broken, special_directives=special_directives, parallel=None): if isinstance(includes, str): includes = [includes] all_groups = (special_directives or []) + [(includes, {})] extensions = [] for modules, directives in all_groups: exclude_now = excludes[:] for other_modules, _ in special_directives: if other_modules != modules: exclude_now.extend(other_modules) d = dict(default_directives) d.update(directives) extensions.extend( cythonize(modules, exclude=exclude_now, exclude_failures=True, language_level=pyver, compiler_directives=d, nthreads=parallel, )) return extensions def build(extensions): try: setup(name = 'stuff', ext_modules = extensions) return extensions, True except: import traceback print('error building extensions %s' % ([ext.name for ext in extensions],)) traceback.print_exc() return extensions, False def _build(args): sys_args, ext = args sys.argv[1:] = sys_args return build([ext]) if __name__ == '__main__': import sys pyver = sys.version_info[0] try: sys.argv.remove('--current-python') except ValueError: pass else: # assume that the stdlib is where the "os" module lives import os os.chdir(os.path.dirname(os.__file__)) try: sys.argv.remove('--parallel') import multiprocessing parallel_compiles = multiprocessing.cpu_count() * 2 print("Building in %d parallel processes" % parallel_compiles) except (ValueError, ImportError): parallel_compiles = None extensions = build_extensions(parallel=parallel_compiles) if parallel_compiles: pool = multiprocessing.Pool(parallel_compiles) sys_args = sys.argv[1:] results = pool.map(_build, [ (sys_args, ext) for ext in extensions ]) pool.close() pool.join() for ext, result in results: if not result: print("building extension %s failed" % (ext[0].name,)) else: build(extensions) enable 'optimize.inline_defnode_calls' in cystdlib.py build script """ Highly experimental script that compiles the CPython standard library using Cython. Execute the script either in the CPython 'Lib' directory or pass the option '--current-python' to compile the standard library of the running Python interpreter. Pass '--parallel' to get a parallel build. Usage example:: $ python cystdlib.py --current-python build_ext -i """ import sys from distutils.core import setup from Cython.Build import cythonize from Cython.Compiler import Options # improve Python compatibility by allowing some broken code Options.error_on_unknown_names = False excludes = ['**/test/**/*.py', '**/tests/**/*.py', '**/__init__.py'] broken = [ 'idlelib/MultiCall.py', 'email/utils.py', 'multiprocessing/reduction.py', 'multiprocessing/util.py', 'threading.py', # interrupt handling 'lib2to3/fixes/fix_sys_exc.py', 'traceback.py', ] default_directives = dict( auto_cpdef=False, # enable when it's safe, see long list of failures below binding=True, set_initial_path='SOURCEFILE') default_directives['optimize.inline_defnode_calls'] = True special_directives = [ (['pkgutil.py', 'decimal.py', 'datetime.py', 'optparse.py', 'sndhdr.py', 'opcode.py', 'ntpath.py', 'urllib/request.py', 'plat-*/TYPES.py', 'plat-*/IN.py', 'tkinter/_fix.py', 'lib2to3/refactor.py', 'webbrowser.py', 'shutil.py', 'multiprocessing/forking.py', 'xml/sax/expatreader.py', 'xmlrpc/client.py', 'pydoc.py', 'xml/etree/ElementTree.py', 'posixpath.py', 'inspect.py', 'ctypes/util.py', 'urllib/parse.py', 'warnings.py', 'tempfile.py', 'trace.py', 'heapq.py', 'pickletools.py', 'multiprocessing/connection.py', 'hashlib.py', 'getopt.py', 'os.py', 'types.py', ], dict(auto_cpdef=False)), ] del special_directives[:] # currently unused def build_extensions(includes='**/*.py', excludes=excludes+broken, special_directives=special_directives, parallel=None): if isinstance(includes, str): includes = [includes] all_groups = (special_directives or []) + [(includes, {})] extensions = [] for modules, directives in all_groups: exclude_now = excludes[:] for other_modules, _ in special_directives: if other_modules != modules: exclude_now.extend(other_modules) d = dict(default_directives) d.update(directives) extensions.extend( cythonize(modules, exclude=exclude_now, exclude_failures=True, language_level=pyver, compiler_directives=d, nthreads=parallel, )) return extensions def build(extensions): try: setup(name = 'stuff', ext_modules = extensions) return extensions, True except: import traceback print('error building extensions %s' % ([ext.name for ext in extensions],)) traceback.print_exc() return extensions, False def _build(args): sys_args, ext = args sys.argv[1:] = sys_args return build([ext]) if __name__ == '__main__': import sys pyver = sys.version_info[0] try: sys.argv.remove('--current-python') except ValueError: pass else: # assume that the stdlib is where the "os" module lives import os os.chdir(os.path.dirname(os.__file__)) try: sys.argv.remove('--parallel') import multiprocessing parallel_compiles = multiprocessing.cpu_count() * 2 print("Building in %d parallel processes" % parallel_compiles) except (ValueError, ImportError): parallel_compiles = None extensions = build_extensions(parallel=parallel_compiles) if parallel_compiles: pool = multiprocessing.Pool(parallel_compiles) sys_args = sys.argv[1:] results = pool.map(_build, [ (sys_args, ext) for ext in extensions ]) pool.close() pool.join() for ext, result in results: if not result: print("building extension %s failed" % (ext[0].name,)) else: build(extensions)
# -*- coding: utf-8 -*- """ werkzeug_dispatch ~~~~~~~~~~~~~~~~~ :copyright: (c) 2014 by Ben Mather. :license: BSD, see LICENSE for more details. """ from werkzeug import Response import json class ViewFactory(object): def get_bindings(self): raise NotImplementedError() class Binding(ViewFactory): def __init__(self, name, method, action, accept='*'): self.name = name self.method = method self.accept = accept self.action = action def get_bindings(self): yield self class View(ViewFactory): """ Wraps a function or callable so that it can be bound to a name in a dispatcher. """ def __init__(self, name, action, *, methods={'GET'}, accept='*'): self._name = name self._methods = methods self._accept = accept self._action = action def __call__(self, env, req, *args, **kwargs): return self._action(env, req, *args, **kwargs) def get_bindings(self): for method in self._methods: yield Binding(self._name, method, self, self._accept) class TemplateView(View): """ Like `View` but if the value returned from the action is not an instance of `Response` it is rendered using the named template. :param name: :param action: called with environment, request and params to generate response. See `View`. :param template: either a string naming the template to be retrieved from the environment or a callable applied to the result to create an http `Response` object """ def __init__(self, name, action, *, methods={'GET'}, template=None): super(TemplateView, self).__init__(name, action, methods=methods) self._template = template def __call__(self, env, req, *args, **kwargs): res = self._action(env, req, *args, **kwargs) if isinstance(res, Response): return res return env.get_template(self._template).render(res) class JsonView(View): def __init__(self, name, action, *, methods={'GET'}): super(JsonView, self).__init__(name, action, methods=methods, accept='text/json') def __call__(self, env, req, *args, **kwargs): res = super(JsonView, self).__call__(env, req, *args, **kwargs) if isinstance(res, Response): return res return Response(json.dumps(res)) class ClassView(ViewFactory): def get_bindings(self): for method in {'GET', 'HEAD', 'POST', 'PUT', 'DELETE'}: # TODO if hasattr(self, method): yield Binding(self.name, method, getattr(self, method)) class Dispatcher(ViewFactory): def __init__(self, views=[], *, default_view=TemplateView): """ :param default_view: callable used to construct new views from functions decorated with the `expose` method """ self._default_view = default_view self._views = {} for view in views: self.add(view) def expose(self, name, *args, **kwargs): """ Decorator to expose a function as a view. Does not modify the wrapped function. """ def decorator(f): self.add(self._default_view(name, f, *args, **kwargs)) return f return decorator def add(self, view_factory): """ Add views from view factory to this dispatcher. Dispatchers can be nested """ for view in view_factory.get_bindings(): if not view.name in self._views: self._views[view.name] = {} if not view.method in self._views[view.name]: self._views[view.name][view.method] = {} self._views[view.name][view.method] = view.action def get_bindings(self): return iter(self._views.items()) def lookup(self, method, name): if not name in self._views: return None if method in self._views[name]: return self._views[name][method] elif method == 'HEAD' and 'GET' in self._views[name]: return self._views[name]['GET'] docs for `Binding` # -*- coding: utf-8 -*- """ werkzeug_dispatch ~~~~~~~~~~~~~~~~~ :copyright: (c) 2014 by Ben Mather. :license: BSD, see LICENSE for more details. """ from werkzeug import Response import json class ViewFactory(object): def get_bindings(self): raise NotImplementedError() class Binding(ViewFactory): """Represents an action associated with a single combination of endpoint name and method `name` A hashable identifier. `method` An http method as an upper case string. `action` The action to perform if the binding is matched `accept` TODO. A string in the same format as the http accept header """ def __init__(self, name, method, action, accept='*'): self.name = name self.method = method self.accept = accept self.action = action def get_bindings(self): yield self class View(ViewFactory): """ Wraps a function or callable so that it can be bound to a name in a dispatcher. """ def __init__(self, name, action, *, methods={'GET'}, accept='*'): self._name = name self._methods = methods self._accept = accept self._action = action def __call__(self, env, req, *args, **kwargs): return self._action(env, req, *args, **kwargs) def get_bindings(self): for method in self._methods: yield Binding(self._name, method, self, self._accept) class TemplateView(View): """ Like `View` but if the value returned from the action is not an instance of `Response` it is rendered using the named template. :param name: :param action: called with environment, request and params to generate response. See `View`. :param template: either a string naming the template to be retrieved from the environment or a callable applied to the result to create an http `Response` object """ def __init__(self, name, action, *, methods={'GET'}, template=None): super(TemplateView, self).__init__(name, action, methods=methods) self._template = template def __call__(self, env, req, *args, **kwargs): res = self._action(env, req, *args, **kwargs) if isinstance(res, Response): return res return env.get_template(self._template).render(res) class JsonView(View): def __init__(self, name, action, *, methods={'GET'}): super(JsonView, self).__init__(name, action, methods=methods, accept='text/json') def __call__(self, env, req, *args, **kwargs): res = super(JsonView, self).__call__(env, req, *args, **kwargs) if isinstance(res, Response): return res return Response(json.dumps(res)) class ClassView(ViewFactory): def get_bindings(self): for method in {'GET', 'HEAD', 'POST', 'PUT', 'DELETE'}: # TODO if hasattr(self, method): yield Binding(self.name, method, getattr(self, method)) class Dispatcher(ViewFactory): def __init__(self, views=[], *, default_view=TemplateView): """ :param default_view: callable used to construct new views from functions decorated with the `expose` method """ self._default_view = default_view self._views = {} for view in views: self.add(view) def expose(self, name, *args, **kwargs): """ Decorator to expose a function as a view. Does not modify the wrapped function. """ def decorator(f): self.add(self._default_view(name, f, *args, **kwargs)) return f return decorator def add(self, view_factory): """ Add views from view factory to this dispatcher. Dispatchers can be nested """ for view in view_factory.get_bindings(): if not view.name in self._views: self._views[view.name] = {} if not view.method in self._views[view.name]: self._views[view.name][view.method] = {} self._views[view.name][view.method] = view.action def get_bindings(self): return iter(self._views.items()) def lookup(self, method, name): if not name in self._views: return None if method in self._views[name]: return self._views[name][method] elif method == 'HEAD' and 'GET' in self._views[name]: return self._views[name]['GET']
""" Author: Ron Lockwood-Childs Licensed under LGPL v2.1 (see file COPYING for details) Widget types and instances. """ import math import pygame from pygame_maker.actors.gui.styles import WidgetStyle from .. import object_type from .. import simple_object_instance from pygame_maker.actions import action from pygame_maker.actions import action_sequence import pygame_maker.support.drawing as drawing import pygame_maker.support.coordinate as coord import pygame_maker.support.color as color class DummySurface(pygame.Rect): """ An object that can be substituted for a pygame Surface, for checking width and height (helpful when calculating a minimum widget size without having a surface to draw it on). """ def get_width(self): return self.width def get_height(self): return self.height class WidgetInstance(simple_object_instance.SimpleObjectInstance): """ Base class for an instance of any Widget type. """ INSTANCE_SYMBOLS = { "visible": 0, "widget_id": "", "widget_class": "", "hover": False, "selected": False, } # WidgetInstance subclasses set this dict to add additional symbols with # default values WIDGET_INSTANCE_SUBCLASS_SYMBOLS = { } THIN_BORDER_WIDTH = 1 MEDIUM_BORDER_WIDTH = 3 THICK_BORDER_WIDTH = 5 @staticmethod def get_integer_setting(setting, max_value): """ Apply an integer setting value appropriately based on whether the value supplied is signed, unsigned, an integer 'px' size (in pixels), or a percentage of the given max_value. """ value = 0 # self.debug("search '{}', which is type '{}'".format(setting, type(setting).__name__)) if not isinstance(setting, str): # in case the value is already a number return setting num_minfo = WidgetStyle.NUMBER_RE.search(setting) if num_minfo: value = int(setting) num_minfo = WidgetStyle.SIGNED_NUMBER_RE.search(setting) if num_minfo: value = int(setting) px_minfo = WidgetStyle.PX_RE.search(setting) if px_minfo: value = int(px_minfo.group(1)) pc_minfo = WidgetStyle.PERCENT_RE.search(setting) if pc_minfo: perc = float(pc_minfo.group(1)) / 100.0 if perc > 1.0: # WidgetStyle doesn't constrain the percentage; force # maximum to 100% perc = 1.0 value = int(math.floor(perc * max_value)) return value def __init__(self, kind, screen, screen_dims, id_, settings=None, **kwargs): simple_object_instance.SimpleObjectInstance.__init__(self, kind, screen_dims, id_, settings, **kwargs) self.screen = screen self.screen_width = self.screen_dims[0] self.screen_height = self.screen_dims[1] self.style_settings = {} self.style_values = {} self.symbols["widget_class"] = "" self.symbols["widget_id"] = "{}".format(self.inst_id) self.symbols["visible"] = self.kind.visible style_hash = self.get_widget_instance_style_hash() style_info_hash = self.game_engine.global_style_settings.get_style(**style_hash) # print("{}".format(style_info_hash)) style_info = WidgetStyle(style_info_hash) self.get_widget_settings(style_info) self.get_inner_setting_values(screen_dims) # width, height = self.get_element_dimensions() # self.symbols["width"] = width # self.symbols["height"] = height for subclass_sym in self.WIDGET_INSTANCE_SUBCLASS_SYMBOLS.keys(): if subclass_sym not in self.symbols.keys(): self.symbols[subclass_sym] = self.WIDGET_INSTANCE_SUBCLASS_SYMBOLS[subclass_sym] @property def visible(self): return self.symbols["visible"] @visible.setter def visible(self, is_visible): vis = (is_visible is True) self.symbols["visible"] = vis @property def widget_id(self): return self.symbols["widget_id"] @widget_id.setter def widget_id(self, new_id): self.symbols["widget_id"] = "{}".format(new_id) @property def widget_class(self): return self.symbols["widget_class"] @widget_class.setter def widget_class(self, new_class): self.symbols["widget_class"] = new_class @property def hover(self): return int(self.symbols["hover"]) @hover.setter def hover(self, hover_on): self.symbols["hover"] = (hover_on is True) @property def selected(self): return int(self.symbols["selected"]) @selected.setter def selected(self, is_selected): self.symbols["selected"] = (is_selected is True) @property def width(self): if "width" in self.symbols.keys(): return self.symbols["width"] else: return 0 @width.setter def width(self, new_width): int_width = int(new_width) self.rect.width = int_width self.symbols["width"] = int_width @property def height(self): return self.symbols["height"] @height.setter def height(self, new_height): int_height = int(new_height) self.rect.height = int_height self.symbols["height"] = int_height @property def parent(self): return self.symbols["parent"] def get_style_setting(self, setting_name, css_properties, parent_settings): """ Given a setting's name, the CSS properties, and the widget's parent's settings, determine its value. If not found in the following places: * the instance symbol table (I.E. in object YAML or passed into constructor) * CSS properties * inherited parent settings then use the default value. """ self.debug("WidgetInstance.get_style_setting(" + "setting_name={}, css_properties={}, parent_settings={})". format(setting_name, css_properties, parent_settings)) default_setting = WidgetStyle.get_style_entry_default(setting_name) setting = default_setting if setting_name in self.symbols.keys(): # settings passed in from object YAML or constructor override # default CSS setting = self.symbols[setting_name] elif setting_name in css_properties.keys(): # check_setting = " ".join(css_properties[setting_name]) check_setting = css_properties[setting_name] # self.debug("check_setting: {}".format(check_setting)) if check_setting != "initial": if (WidgetStyle.compare_value_vs_constraint(setting_name, "inherit") and check_setting == "inherit" and self.parent is not None): setting = parent_settings[setting_name] elif WidgetStyle.compare_value_vs_constraint(setting_name, check_setting): setting = check_setting return setting def get_widget_settings(self, css_properties): """ Collect all settings for the widget, using (in order of precedence): 1. values set in the constructor or in YAML properties 2. applicable CSS properties 3. values inherited from a parent widget :param css_properties: Style settings found in the game engine based on the widget's current properties (id, class name, etc.) :type css_properties: WidgetStyle """ self.debug("WidgetInstance.get_style_settings(css_properties={})".format(css_properties)) parent_settings = None if self.parent is not None and isinstance(self.parent, WidgetInstance): # this could result in the parent checking its parent's # settings.. parent_settings = self.parent.get_widget_settings(css_properties) for setting_name in WidgetStyle.STYLE_CONSTRAINTS.keys(): # self.debug("Get widget setting {} .. ".format(setting_name)) self.style_settings[setting_name] = self.get_style_setting(setting_name, css_properties, parent_settings) def get_outer_setting_values(self, surface): """ Calculate values for all margin, border, and padding settings. :param surface: A (sub-)surface that can report its own width and height :type surface: pygame.Surface or DummySurface """ self.debug("WidgetInstance.get_outer_setting_values(surface={})".format(surface)) # CSS box model: calculate margin, border, and padding so the remaining # setting values can be calculated surface_width = surface.get_width() surface_height = surface.get_height() for setting_name in ("margin-left", "border-left-width", "padding-left", "padding-right", "border-right-width", "margin-right"): style_val = type(self).get_integer_setting(self.style_settings[setting_name], surface_width) self.style_values[setting_name] = style_val for setting_name in ("margin-top", "border-top-width", "padding-top", "padding-bottom", "border-bottom-width", "margin-bottom"): style_val = type(self).get_integer_setting(self.style_settings[setting_name], surface_height) self.style_values[setting_name] = style_val def calculate_outer_dimensions(self): self.debug("WidgetInstance.calculate_outer_dimensions()") # calculate width outer_width = (self.style_values["margin-left"] + self.style_values["margin-right"] + self.style_values["padding-left"] + self.style_values["padding-right"]) border_left_width = 0 if self.style_settings["border-left-style"] not in ("none", "hidden"): border_left_width = self.style_values["border-left-width"] if self.style_settings["border-left-style"] == "double": border_left_width = border_left_width * 2 + 1 border_right_width = 0 if self.style_settings["border-right-style"] not in ("none", "hidden"): border_right_width = self.style_values["border-right-width"] if self.style_settings["border-right-style"] == "double": border_right_width = border_right_width * 2 + 1 outer_width += border_left_width + border_right_width # calculate height outer_height = (self.style_values["margin-top"] + self.style_values["margin-bottom"] + self.style_values["padding-top"] + self.style_values["padding-bottom"]) border_top_height = 0 if self.style_settings["border-top-style"] not in ("none", "hidden"): border_top_height = self.style_values["border-top-width"] if self.style_settings["border-top-style"] == "double": border_top_height = border_top_height * 2 + 1 border_bottom_height = 0 if self.style_settings["border-bottom-style"] not in ("none", "hidden"): border_bottom_height = self.style_values["border-bottom-width"] if self.style_settings["border-bottom-style"] == "double": border_bottom_height = border_bottom_height * 2 + 1 outer_height += border_top_height + border_bottom_height return (outer_width, outer_height) def get_inner_setting_values(self, max_dimensions): self.debug("WidgetInstance.get_inner_setting_values(max_dimensions={})".format(max_dimensions)) # calculate min-width, width, max-width values min_width_val = type(self).get_integer_setting(self.style_settings["min-width"], max_dimensions[0]) if min_width_val > max_dimensions[0]: min_width_val = max_dimensions[0] self.style_values["min-width"] = min_width_val max_width_val = max_dimensions[0] if self.style_settings["max-width"] != "none": max_width_val = type(self).get_integer_setting(self.style_settings["max-width"], max_dimensions[0]) if max_width_val > max_dimensions[0]: max_width_val = max_dimensions[0] elif max_width_val < min_width_val: max_width_val = min_width_val self.style_values["max-width"] = max_width_val if self.style_settings["width"] != "auto": width_val = type(self).get_integer_setting(self.style_settings["width"], max_dimensions[0]) if width_val < min_width_val: width_val = min_width_val if width_val > max_width_val: width_val = max_width_val self.style_values["width"] = width_val # calculate min-height, height, max-height values min_height_val = type(self).get_integer_setting(self.style_settings["min-height"], max_dimensions[1]) if min_height_val > max_dimensions[1]: min_height_val = max_dimensions[1] self.style_values["min-height"] = min_height_val max_height_val = max_dimensions[1] if self.style_settings["max-height"] != "none": max_height_val = type(self).get_integer_setting(self.style_settings["max-height"], max_dimensions[1]) if max_height_val > max_dimensions[1]: max_height_val = max_dimensions[1] elif max_height_val < min_height_val: max_height_val = min_height_val self.style_values["max-height"] = max_height_val if self.style_settings["height"] != "auto": height_val = type(self).get_integer_setting(self.style_settings["height"], max_dimensions[1]) if height_val < min_height_val: height_val = min_height_val if height_val > max_height_val: height_val = max_height_val self.style_values["height"] = height_val def get_element_dimensions(self): """ Called after get_inner_setting_values() to determine the size of the widget's content. The base class will use the 'min-width' and 'min-height' properties as element dimensions, if the 'width' and 'height' properties are 'auto'; otherwise, it will use the 'width' and/or 'height' properties' values. Subclasses should start here, and expand as needed to fit widget content, honoring the 'max-width' and 'max-height' properties. """ self.debug("WidgetInstance.get_element_dimensions()") element_width = self.style_values["min-width"] element_height = self.style_values["min-height"] if self.style_settings["width"] != "auto": element_width = self.style_values["width"] if self.style_settings["height"] != "auto": element_height = self.style_values["height"] return (element_width, element_height) def get_color_values(self): self.debug("WidgetInstance.get_color_values()") color_property_list = ["border-top-color", "border-right-color", "border-bottom-color", "border-left-color", "background-color", "color"] # put Color objects into border/background color settings for color_property in color_property_list: color_name = self.style_settings[color_property] default_color = str(WidgetStyle.STYLE_CONSTRAINTS[color_property]["default"]) color_string = color_name if color_name != "transparent": minfo = WidgetStyle.WEB_COLOR_RE.match(color_name) if minfo: if len(color_name) == 4: color_string = "#0{}0{}0{}".format(*color_name[1:4]) elif len(color_name) == 5: color_string = "#0{}0{}0{}0{}".format(*color_name[1:5]) elif len(color_name) == 6: str_ary = [color_name[idx] for idx in range(1,4)] str_ary.append(color_name[4:]) color_string = "#0{}0{}0{}{}".format(*str_ary) elif len(color_name) == 8: color_string = "{}0{}".format(color_name[:7], color_name[7]) try: self.style_values[color_property] = color.Color(color_string) except ValueError: self.style_values[color_property] = color.Color(default_color) elif color_property == "color": # font color isn't supposed to be "transparent", so replace # this string with the default color self.style_values[color_property] = color.Color(default_color) def get_min_size(self): """ Calculate the widget's minimum width and height, and return them in a tuple. Container widgets may call this to find out how much space it needs to reserve for each of its child widgets. The base class only returns the space surrounding the widget's contents: the sum of margin, border, and padding widths for each side. Subclasses should call this method, then determine the element's actual dimensions after taking min/max width and height into account. """ self.debug("WidgetInstance.get_min_size()") # create a surface such that 1% is a minimum of 1 pixel dummy_surface = DummySurface(0,0,100,100) min_width = 1 min_height = 1 style_hash = self.get_widget_instance_style_hash() style_info = WidgetStyle(self.game_engine.global_style_settings.get_style(**style_hash)) self.get_widget_settings(style_info) self.get_outer_setting_values(dummy_surface) min_outer_dims = self.calculate_outer_dimensions() if min_outer_dims[0] > min_width: min_width = min_outer_dims[0] if min_outer_dims[1] > min_height: min_height = min_outer_dims[1] return (min_width, min_height) def calculate_top_outer_border_size(self): top_size = self.style_values["margin-top"] + self.style_values["padding-top"] border_top_height = 0 if (self.style_settings["border-top-style"] not in ("none", "hidden") and (self.style_settings["border-top-color"] != "transparent")): border_top_height = self.style_values["border-top-width"] if self.style_settings["border-top-style"] == "double": border_top_height = border_top_height * 2 + 1 top_size += border_top_height return top_size def calculate_left_outer_border_size(self): left_size = self.style_values["margin-left"] + self.style_values["padding-left"] border_left_width = 0 if (self.style_settings["border-left-style"] not in ("none", "hidden") and (self.style_settings["border-left-color"] != "transparent")): border_left_width = self.style_values["border-left-width"] if self.style_settings["border-left-style"] == "double": border_left_width = border_left_width * 2 + 1 left_size += border_left_width return left_size def draw_border_side(self, screen, side, outer_dims, element_dims, width, color, style): draw_rect = pygame.Rect(0, 0, 0, 0) # thick lines are _centered_ on the calculated coordinates, so shift them by 1/2 their width thick_adj = 0 if width > 1: thick_adj = int(math.floor((width-1) / 2)) if side == "top": draw_rect.left = self.style_values["margin-left"] draw_rect.top = self.style_values["margin-top"] + thick_adj draw_rect.width = self.style_values["border-left-width"] + \ self.style_values["padding-left"] + element_dims[0] + \ self.style_values["padding-right"] if draw_rect.width <= 1: return elif side == "bottom": draw_rect.left = self.style_values["margin-left"] draw_rect.top = (self.calculate_top_outer_border_size() + element_dims[1] + self.style_values["padding-bottom"] + thick_adj) draw_rect.width = self.style_values["border-left-width"] + \ self.style_values["padding-left"] + element_dims[0] + \ self.style_values["padding-right"] if draw_rect.width <= 1: return elif side == "left": draw_rect.left = self.style_values["margin-left"] + thick_adj draw_rect.top = self.style_values["margin-top"] draw_rect.height = self.style_values["border-top-width"] + \ self.style_values["padding-top"] + element_dims[1] + \ self.style_values["padding-bottom"] if draw_rect.height <= 1: return elif side == "right": draw_rect.left = (self.calculate_left_outer_border_size() + element_dims[0] + self.style_values["padding-right"] + thick_adj) draw_rect.top = self.style_values["margin-top"] draw_rect.height = self.style_values["border-top-width"] + \ self.style_values["padding-top"] + element_dims[1] + \ self.style_values["padding-bottom"] if draw_rect.height <= 1: return start_coord = coord.Coordinate(draw_rect.left, draw_rect.top) end_coord = coord.Coordinate(draw_rect.right, draw_rect.bottom) # self.debug("Draw {} border from {} to {}, width {}, color {}, style {}".format( # side, start_coord, end_coord, width, color, style)) drawing.draw_line(screen, start_coord, end_coord, width, color, style) def draw_border(self, screen, outer_dims): self.debug("WidgetInstance.draw_border(screen={}, outer_dims={})". format(screen, outer_dims)) element_dims = self.get_element_dimensions() for side in ("top", "right", "bottom", "left"): border_width = self.style_values["border-{}-width".format(side)] border_style = self.style_settings["border-{}-style".format(side)] border_color_style = self.style_settings["border-{}-color".format(side)] border_color = "transparent" if border_color_style != "transparent": border_color = self.style_values["border-{}-color".format(side)] else: continue if border_style in ("none", "hidden") or (border_width < 1) or \ (border_color == "transparent"): continue self.draw_border_side(screen, side, outer_dims, element_dims, border_width, border_color, border_style) def draw(self, screen): """ Draw the widget instance to a surface using css properties. Always recalculate the settings, in case the style has been updated, or an attribute has changed that may affect the style. :param screen: A pygame surface upon which to draw the widget :type screen: :py:class:`pygame.Surface` """ self.debug("{} inst {}: WidgetInstance.draw(screen={})". format(self.kind.name, self.inst_id, screen)) if not self.visible: return style_hash = self.get_widget_instance_style_hash() # self.debug("Find style {} in {} ..".format(style_hash, style_info)) style_info = WidgetStyle(self.game_engine.global_style_settings.get_style(**style_hash)) self.get_widget_settings(style_info) # self.debug("Style settings: {}".format(self.style_settings)) self.get_outer_setting_values(screen) outer_dims = self.calculate_outer_dimensions() max_inner_dims = (screen.get_width() - outer_dims[0], screen.get_height() - outer_dims[1]) self.get_inner_setting_values(max_inner_dims) self.get_color_values() # self.debug("Style values: {}".format(self.style_values)) self.draw_border(screen, outer_dims) def get_widget_instance_style_hash(self): """ Collect widget instance style information for comparison with stylesheet settings. Subclasses should start here and add attribute matches (e.g. a checkbutton could match on "checked" attribute "on" or "off") """ self.debug("WidgetInstance.get_widget_instance_style_hash()") props = { "element_type": self.kind.name, "element_id": self.widget_id, } if len(self.widget_class) > 0: props["element_class"] = self.widget_class if self.hover: props["pseudo_class"] = "hover" return props class LabelWidgetInstance(WidgetInstance): # WIDGET_INSTANCE_SUBCLASS_SYMBOLS = { # "label": "", # "font": "", # "font_size": "initial", # "font_style": "initial", # "font_weight": "initial", # "text_decoration": "initial", # "text_transform": "initial", # "text_align": "initial", # "vertical_align": "initial", # "color": "black" # } FONT_SIZE_CATEGORIES = { "small": 10, "medium": 14, "large": 20 } def __init__(self, kind, screen, screen_dims, id_, settings=None, **kwargs): super(LabelWidgetInstance, self).__init__(kind, screen, screen_dims, id_, settings, **kwargs) self.font_resource = None self.get_font_resource() self._font_point_size = 12 @property def label(self): return self.symbols["label"] @label.setter def label(self, new_label): try: self.symbols["label"] = str(new_label) except ValueError: pass @property def font(self): return self.symbols["font"] @font.setter def font(self, new_font): try: self.symbols["font"] = str(new_font) self.get_font_resource() except ValueError: pass @property def font_size(self): return self.symbols["font_size"] @font_size.setter def font_size(self, new_size): self.symbols["font_size"] = new_size def get_font_resource(self): self.debug("LabelWidgetInstance.get_font_resource()") if (len(self.font) == 0) or (self.font not in self.kind.game_engine.resources['fonts'].keys()): # revert to a system font, if found if hasattr(self.kind.game_engine, 'system_font'): self.font_resource = self.kind.game_engine.system_font else: return None else: self.font_resource = self.kind.game_engine.resources['fonts'][self.font] def calc_label_size(self): self.debug("LabelWidgetInstance.calc_label_size()") if self.font_resource is None: return (0, 0) if len(self.label) == 0: return (0, 0) font_rndr = self.font_resource.get_font_renderer() return font_rndr.calc_render_size(self.label) def get_min_size(self): self.debug("LabelWidgetInstance.get_min_size()") total_width, total_height = super(LabelWidgetInstance, self).get_min_size() text_width, text_height = self.calc_label_size() total_width += text_width total_height += text_height return (total_width, total_height) def get_element_dimensions(self): self.debug("LabelWidgetInstance.get_element_dimensions()") element_width = self.style_values["min-width"] element_height = self.style_values["min-height"] label_width, label_height = self.calc_label_size() if self.style_settings["width"] != "auto": element_width = self.style_values["width"] elif label_width > element_width: element_width = label_width if self.style_settings["height"] != "auto": element_height = self.style_values["height"] elif label_height > element_height: element_height = label_height return (element_width, element_height) def draw_text(self, surface): self.debug("LabelWidgetInstance.draw_text(surface={})".format(surface)) surf_width = surface.get_width() surf_height = surface.get_height() font_rndr = self.font_resource.get_font_renderer() # apply horizontal, vertical alignment text_width, text_height = font_rndr.calc_render_size(self.label) top_left = coord.Coordinate(0, 0) if surf_width > text_width: if self.style_settings["text-align"] == "center": top_left.x = (surf_width / 2) - (text_width / 2) elif self.style_settings["text-align"] == "right": top_left.x = surf_width - text_width if surf_height > text_height: if self.style_settings["vertical-align"] == "middle": top_left.y = (surf_height / 2) - (text_height / 2) elif self.style_settings["vertical-align"] == "bottom": top_left.y = surf_height - text_height font_rndr.render_text(surface, top_left, self.label, self.style_values["color"]) def draw(self, screen): self.debug("LabelWidgetInstance.draw(screen={})".format(screen)) # draw any visible borders super(LabelWidgetInstance, self).draw(screen) # create a subsurface big enough to hold the element dimensions label_width, label_height = self.calc_label_size() if (label_width > 0) and (label_height > 0): subsurf_width, subsurf_height = self.get_element_dimensions() subsurf_left = super(LabelWidgetInstance, self).calculate_left_outer_border_size() subsurf_top = super(LabelWidgetInstance, self).calculate_top_outer_border_size() subsurf_rect = pygame.Rect(subsurf_left, subsurf_top, subsurf_width, subsurf_height) subsurf = screen.subsurface(subsurf_rect) self.draw_text(subsurf) class WidgetObjectTypeInvalid(Exception): pass class WidgetObjectType(object_type.ObjectType): DEFAULT_VISIBLE = False # subclasses set this to their own instance type WIDGET_INSTANCE_TYPE = WidgetInstance # subclasses can add their own YAML properties by setting this class # variable to a list of tuples [(entry_name, entry_type), ..] where # entry_type is usually a standard type: str, int, or bool WIDGET_SUBCLASS_KW_ENTRIES = [] @classmethod def gen_kwargs_from_yaml_obj(cls, obj_name, obj_yaml, game_engine): kwargs = super(WidgetObjectType, cls).gen_kwargs_from_yaml_obj(obj_name, obj_yaml, game_engine) kwargs.update({ "visible": WidgetObjectType.DEFAULT_VISIBLE, }) if "visible" in obj_yaml.keys(): kwargs["visible"] = (obj_yaml["visible"] is True) for kw_entry, entry_type in cls.WIDGET_SUBCLASS_KW_ENTRIES: if kw_entry in obj_yaml.keys(): if isinstance(entry_type, bool): kwargs[kw_entry] = (obj_yaml[kw_entry] is True) else: # set the kwarg if the type conversion succeeds try: kwargs[kw_entry] = entry_type(obj_yaml[kw_entry]) except ValueError: pass return kwargs def __init__(self, widget_name, game_engine, **kwargs): super(WidgetObjectType, self).__init__(widget_name, game_engine, **kwargs) #: Flag whether this widget type is a container for other widgets self.is_container = False self.visible = self.DEFAULT_VISIBLE # default draw action sequence draws the object's sprite self["draw"] = action_sequence.ActionSequence() self["draw"].append_action(action.DrawAction("draw_self")) if kwargs and "visible" in kwargs: self.visible = kwargs["visible"] def make_new_instance(self, screen, instance_properties=None): """ Generate a new instance of the widget type in response to :py:meth:`~pygame_maker.actors.object_type.ObjectType.create_instance` :param screen: The surface the instance will be drawn upon. The instance can use this surface's (often a sub-surface's) width and height parameters to determine child widget placement :type screen: :py:class:`pygame.Surface` :param instance_properties: A hash of settings to be applied. See kwargs entry in :py:meth:`~pygame_maker.actors.simple_object_instance.SimpleObjectInstance.__init__` :type instance_properties: dict """ self.debug("WidgetObjectType.make_new_instance(screen={}, instance_properties={})".format(screen, instance_properties)) screen_dims = (screen.get_width(), screen.get_height()) new_instance = self.WIDGET_INSTANCE_TYPE(self, screen, screen_dims, self._id, instance_properties) self.instance_list.append(new_instance) def update(self): """ Update all instances of this widget type. """ pass def draw(self, in_event): """Draw all visible instances.""" self.debug("WidgetObjectType.draw(in_event={})".format(in_event)) if len(self.instance_list) > 0: for inst in self.instance_list: # self.debug("Check inst {}".format(inst)) if inst.parent is not None: continue if inst.visible: # self.debug("Draw visible inst {}".format(inst)) inst.draw(inst.screen) class LabelWidgetObjectType(WidgetObjectType): WIDGET_INSTANCE_TYPE = LabelWidgetInstance widget.py: More pylint cleanups """ Author: Ron Lockwood-Childs Licensed under LGPL v2.1 (see file COPYING for details) Widget types and instances. """ import math import pygame from pygame_maker.actors.gui.styles import WidgetStyle from .. import object_type from .. import simple_object_instance from pygame_maker.actions import action from pygame_maker.actions import action_sequence import pygame_maker.support.drawing as drawing import pygame_maker.support.coordinate as coord import pygame_maker.support.color as color class DummySurface(pygame.Rect): """ An object that can be substituted for a pygame Surface, for checking width and height (helpful when calculating a minimum widget size without having a surface to draw it on). """ def get_width(self): return self.width def get_height(self): return self.height class WidgetInstance(simple_object_instance.SimpleObjectInstance): """ Base class for an instance of any Widget type. """ INSTANCE_SYMBOLS = { "visible": 0, "widget_id": "", "widget_class": "", "hover": False, "selected": False, } # WidgetInstance subclasses set this dict to add additional symbols with # default values WIDGET_INSTANCE_SUBCLASS_SYMBOLS = { } THIN_BORDER_WIDTH = 1 MEDIUM_BORDER_WIDTH = 3 THICK_BORDER_WIDTH = 5 @staticmethod def get_integer_setting(setting, max_value): """ Apply an integer setting value appropriately based on whether the value supplied is signed, unsigned, an integer 'px' size (in pixels), or a percentage of the given max_value. """ value = 0 # self.debug("search '{}', which is type '{}'".format(setting, type(setting).__name__)) if not isinstance(setting, str): # in case the value is already a number return setting num_minfo = WidgetStyle.NUMBER_RE.search(setting) if num_minfo: value = int(setting) num_minfo = WidgetStyle.SIGNED_NUMBER_RE.search(setting) if num_minfo: value = int(setting) px_minfo = WidgetStyle.PX_RE.search(setting) if px_minfo: value = int(px_minfo.group(1)) pc_minfo = WidgetStyle.PERCENT_RE.search(setting) if pc_minfo: perc = float(pc_minfo.group(1)) / 100.0 if perc > 1.0: # WidgetStyle doesn't constrain the percentage; force # maximum to 100% perc = 1.0 value = int(math.floor(perc * max_value)) return value def __init__(self, kind, screen, screen_dims, id_, settings=None, **kwargs): simple_object_instance.SimpleObjectInstance.__init__(self, kind, screen_dims, id_, settings, **kwargs) self.screen = screen self.screen_width = self.screen_dims[0] self.screen_height = self.screen_dims[1] self.style_settings = {} self.style_values = {} self.symbols["widget_class"] = "" self.symbols["widget_id"] = "{}".format(self.inst_id) self.symbols["visible"] = self.kind.visible style_hash = self.get_widget_instance_style_hash() style_info_hash = self.game_engine.global_style_settings.get_style(**style_hash) # print("{}".format(style_info_hash)) style_info = WidgetStyle(style_info_hash) self.get_widget_settings(style_info) self.get_inner_setting_values(screen_dims) # width, height = self.get_element_dimensions() # self.symbols["width"] = width # self.symbols["height"] = height for subclass_sym in self.WIDGET_INSTANCE_SUBCLASS_SYMBOLS.keys(): if subclass_sym not in self.symbols.keys(): self.symbols[subclass_sym] = self.WIDGET_INSTANCE_SUBCLASS_SYMBOLS[subclass_sym] @property def visible(self): return self.symbols["visible"] @visible.setter def visible(self, is_visible): vis = (is_visible is True) self.symbols["visible"] = vis @property def widget_id(self): return self.symbols["widget_id"] @widget_id.setter def widget_id(self, new_id): self.symbols["widget_id"] = "{}".format(new_id) @property def widget_class(self): return self.symbols["widget_class"] @widget_class.setter def widget_class(self, new_class): self.symbols["widget_class"] = new_class @property def hover(self): return int(self.symbols["hover"]) @hover.setter def hover(self, hover_on): self.symbols["hover"] = (hover_on is True) @property def selected(self): return int(self.symbols["selected"]) @selected.setter def selected(self, is_selected): self.symbols["selected"] = (is_selected is True) @property def width(self): if "width" in self.symbols.keys(): return self.symbols["width"] else: return 0 @width.setter def width(self, new_width): int_width = int(new_width) self.rect.width = int_width self.symbols["width"] = int_width @property def height(self): return self.symbols["height"] @height.setter def height(self, new_height): int_height = int(new_height) self.rect.height = int_height self.symbols["height"] = int_height @property def parent(self): return self.symbols["parent"] def get_style_setting(self, setting_name, css_properties, parent_settings): """ Given a setting's name, the CSS properties, and the widget's parent's settings, determine its value. If not found in the following places: * the instance symbol table (I.E. in object YAML or passed into constructor) * CSS properties * inherited parent settings then use the default value. """ self.debug("WidgetInstance.get_style_setting(" + "setting_name={}, css_properties={}, parent_settings={})". format(setting_name, css_properties, parent_settings)) default_setting = WidgetStyle.get_style_entry_default(setting_name) setting = default_setting if setting_name in self.symbols.keys(): # settings passed in from object YAML or constructor override # default CSS setting = self.symbols[setting_name] elif setting_name in css_properties.keys(): # check_setting = " ".join(css_properties[setting_name]) check_setting = css_properties[setting_name] # self.debug("check_setting: {}".format(check_setting)) if check_setting != "initial": if (WidgetStyle.compare_value_vs_constraint(setting_name, "inherit") and check_setting == "inherit" and self.parent is not None): setting = parent_settings[setting_name] elif WidgetStyle.compare_value_vs_constraint(setting_name, check_setting): setting = check_setting return setting def get_widget_settings(self, css_properties): """ Collect all settings for the widget, using (in order of precedence): 1. values set in the constructor or in YAML properties 2. applicable CSS properties 3. values inherited from a parent widget :param css_properties: Style settings found in the game engine based on the widget's current properties (id, class name, etc.) :type css_properties: WidgetStyle """ self.debug("WidgetInstance.get_style_settings(css_properties={})".format(css_properties)) parent_settings = None if self.parent is not None and isinstance(self.parent, WidgetInstance): # this could result in the parent checking its parent's # settings.. parent_settings = self.parent.get_widget_settings(css_properties) for setting_name in WidgetStyle.STYLE_CONSTRAINTS.keys(): # self.debug("Get widget setting {} .. ".format(setting_name)) self.style_settings[setting_name] = self.get_style_setting(setting_name, css_properties, parent_settings) def get_outer_setting_values(self, surface): """ Calculate values for all margin, border, and padding settings. :param surface: A (sub-)surface that can report its own width and height :type surface: pygame.Surface or DummySurface """ self.debug("WidgetInstance.get_outer_setting_values(surface={})".format(surface)) # CSS box model: calculate margin, border, and padding so the remaining # setting values can be calculated surface_width = surface.get_width() surface_height = surface.get_height() for setting_name in ("margin-left", "border-left-width", "padding-left", "padding-right", "border-right-width", "margin-right"): style_val = type(self).get_integer_setting(self.style_settings[setting_name], surface_width) self.style_values[setting_name] = style_val for setting_name in ("margin-top", "border-top-width", "padding-top", "padding-bottom", "border-bottom-width", "margin-bottom"): style_val = type(self).get_integer_setting(self.style_settings[setting_name], surface_height) self.style_values[setting_name] = style_val def calculate_outer_dimensions(self): self.debug("WidgetInstance.calculate_outer_dimensions()") # calculate width outer_width = (self.style_values["margin-left"] + self.style_values["margin-right"] + self.style_values["padding-left"] + self.style_values["padding-right"]) border_left_width = 0 if self.style_settings["border-left-style"] not in ("none", "hidden"): border_left_width = self.style_values["border-left-width"] if self.style_settings["border-left-style"] == "double": border_left_width = border_left_width * 2 + 1 border_right_width = 0 if self.style_settings["border-right-style"] not in ("none", "hidden"): border_right_width = self.style_values["border-right-width"] if self.style_settings["border-right-style"] == "double": border_right_width = border_right_width * 2 + 1 outer_width += border_left_width + border_right_width # calculate height outer_height = (self.style_values["margin-top"] + self.style_values["margin-bottom"] + self.style_values["padding-top"] + self.style_values["padding-bottom"]) border_top_height = 0 if self.style_settings["border-top-style"] not in ("none", "hidden"): border_top_height = self.style_values["border-top-width"] if self.style_settings["border-top-style"] == "double": border_top_height = border_top_height * 2 + 1 border_bottom_height = 0 if self.style_settings["border-bottom-style"] not in ("none", "hidden"): border_bottom_height = self.style_values["border-bottom-width"] if self.style_settings["border-bottom-style"] == "double": border_bottom_height = border_bottom_height * 2 + 1 outer_height += border_top_height + border_bottom_height return (outer_width, outer_height) def get_inner_setting_values(self, max_dimensions): self.debug("WidgetInstance.get_inner_setting_values(max_dimensions={})". format(max_dimensions)) # calculate min-width, width, max-width values min_width_val = type(self).get_integer_setting(self.style_settings["min-width"], max_dimensions[0]) if min_width_val > max_dimensions[0]: min_width_val = max_dimensions[0] self.style_values["min-width"] = min_width_val max_width_val = max_dimensions[0] if self.style_settings["max-width"] != "none": max_width_val = type(self).get_integer_setting(self.style_settings["max-width"], max_dimensions[0]) if max_width_val > max_dimensions[0]: max_width_val = max_dimensions[0] elif max_width_val < min_width_val: max_width_val = min_width_val self.style_values["max-width"] = max_width_val if self.style_settings["width"] != "auto": width_val = type(self).get_integer_setting(self.style_settings["width"], max_dimensions[0]) if width_val < min_width_val: width_val = min_width_val if width_val > max_width_val: width_val = max_width_val self.style_values["width"] = width_val # calculate min-height, height, max-height values min_height_val = type(self).get_integer_setting(self.style_settings["min-height"], max_dimensions[1]) if min_height_val > max_dimensions[1]: min_height_val = max_dimensions[1] self.style_values["min-height"] = min_height_val max_height_val = max_dimensions[1] if self.style_settings["max-height"] != "none": max_height_val = type(self).get_integer_setting(self.style_settings["max-height"], max_dimensions[1]) if max_height_val > max_dimensions[1]: max_height_val = max_dimensions[1] elif max_height_val < min_height_val: max_height_val = min_height_val self.style_values["max-height"] = max_height_val if self.style_settings["height"] != "auto": height_val = type(self).get_integer_setting(self.style_settings["height"], max_dimensions[1]) if height_val < min_height_val: height_val = min_height_val if height_val > max_height_val: height_val = max_height_val self.style_values["height"] = height_val def get_element_dimensions(self): """ Called after get_inner_setting_values() to determine the size of the widget's content. The base class will use the 'min-width' and 'min-height' properties as element dimensions, if the 'width' and 'height' properties are 'auto'; otherwise, it will use the 'width' and/or 'height' properties' values. Subclasses should start here, and expand as needed to fit widget content, honoring the 'max-width' and 'max-height' properties. """ self.debug("WidgetInstance.get_element_dimensions()") element_width = self.style_values["min-width"] element_height = self.style_values["min-height"] if self.style_settings["width"] != "auto": element_width = self.style_values["width"] if self.style_settings["height"] != "auto": element_height = self.style_values["height"] return (element_width, element_height) def get_color_values(self): self.debug("WidgetInstance.get_color_values()") color_property_list = ["border-top-color", "border-right-color", "border-bottom-color", "border-left-color", "background-color", "color"] # put Color objects into border/background color settings for color_property in color_property_list: color_name = self.style_settings[color_property] default_color = str(WidgetStyle.STYLE_CONSTRAINTS[color_property]["default"]) color_string = color_name if color_name != "transparent": minfo = WidgetStyle.WEB_COLOR_RE.match(color_name) if minfo: if len(color_name) == 4: color_string = "#0{}0{}0{}".format(*color_name[1:4]) elif len(color_name) == 5: color_string = "#0{}0{}0{}0{}".format(*color_name[1:5]) elif len(color_name) == 6: str_ary = [color_name[idx] for idx in range(1, 4)] str_ary.append(color_name[4:]) color_string = "#0{}0{}0{}{}".format(*str_ary) elif len(color_name) == 8: color_string = "{}0{}".format(color_name[:7], color_name[7]) try: self.style_values[color_property] = color.Color(color_string) except ValueError: self.style_values[color_property] = color.Color(default_color) elif color_property == "color": # font color isn't supposed to be "transparent", so replace # this string with the default color self.style_values[color_property] = color.Color(default_color) def get_min_size(self): """ Calculate the widget's minimum width and height, and return them in a tuple. Container widgets may call this to find out how much space it needs to reserve for each of its child widgets. The base class only returns the space surrounding the widget's contents: the sum of margin, border, and padding widths for each side. Subclasses should call this method, then determine the element's actual dimensions after taking min/max width and height into account. """ self.debug("WidgetInstance.get_min_size()") # create a surface such that 1% is a minimum of 1 pixel dummy_surface = DummySurface(0, 0, 100, 100) min_width = 1 min_height = 1 style_hash = self.get_widget_instance_style_hash() style_info = WidgetStyle(self.game_engine.global_style_settings.get_style(**style_hash)) self.get_widget_settings(style_info) self.get_outer_setting_values(dummy_surface) min_outer_dims = self.calculate_outer_dimensions() if min_outer_dims[0] > min_width: min_width = min_outer_dims[0] if min_outer_dims[1] > min_height: min_height = min_outer_dims[1] return (min_width, min_height) def calculate_top_outer_border_size(self): top_size = self.style_values["margin-top"] + self.style_values["padding-top"] border_top_height = 0 if (self.style_settings["border-top-style"] not in ("none", "hidden") and (self.style_settings["border-top-color"] != "transparent")): border_top_height = self.style_values["border-top-width"] if self.style_settings["border-top-style"] == "double": border_top_height = border_top_height * 2 + 1 top_size += border_top_height return top_size def calculate_left_outer_border_size(self): left_size = self.style_values["margin-left"] + self.style_values["padding-left"] border_left_width = 0 if (self.style_settings["border-left-style"] not in ("none", "hidden") and (self.style_settings["border-left-color"] != "transparent")): border_left_width = self.style_values["border-left-width"] if self.style_settings["border-left-style"] == "double": border_left_width = border_left_width * 2 + 1 left_size += border_left_width return left_size def draw_border_side(self, screen, side, element_dims, bwidth, bcolor, style): draw_rect = pygame.Rect(0, 0, 0, 0) # thick lines are _centered_ on the calculated coordinates, so shift # them by 1/2 their width thick_adj = 0 if bwidth > 1: thick_adj = int(math.floor((bwidth-1) / 2)) if side == "top": draw_rect.left = self.style_values["margin-left"] draw_rect.top = self.style_values["margin-top"] + thick_adj draw_rect.width = self.style_values["border-left-width"] + \ self.style_values["padding-left"] + element_dims[0] + \ self.style_values["padding-right"] if draw_rect.width <= 1: return elif side == "bottom": draw_rect.left = self.style_values["margin-left"] draw_rect.top = (self.calculate_top_outer_border_size() + element_dims[1] + self.style_values["padding-bottom"] + thick_adj) draw_rect.width = self.style_values["border-left-width"] + \ self.style_values["padding-left"] + element_dims[0] + \ self.style_values["padding-right"] if draw_rect.width <= 1: return elif side == "left": draw_rect.left = self.style_values["margin-left"] + thick_adj draw_rect.top = self.style_values["margin-top"] draw_rect.height = self.style_values["border-top-width"] + \ self.style_values["padding-top"] + element_dims[1] + \ self.style_values["padding-bottom"] if draw_rect.height <= 1: return elif side == "right": draw_rect.left = (self.calculate_left_outer_border_size() + element_dims[0] + self.style_values["padding-right"] + thick_adj) draw_rect.top = self.style_values["margin-top"] draw_rect.height = self.style_values["border-top-width"] + \ self.style_values["padding-top"] + element_dims[1] + \ self.style_values["padding-bottom"] if draw_rect.height <= 1: return start_coord = coord.Coordinate(draw_rect.left, draw_rect.top) end_coord = coord.Coordinate(draw_rect.right, draw_rect.bottom) # self.debug("Draw {} border from {} to {}, width {}, color {}, style {}".format( # side, start_coord, end_coord, width, color, style)) drawing.draw_line(screen, start_coord, end_coord, bwidth, bcolor, style) def draw_border(self, screen, outer_dims): self.debug("WidgetInstance.draw_border(screen={}, outer_dims={})". format(screen, outer_dims)) element_dims = self.get_element_dimensions() for side in ("top", "right", "bottom", "left"): border_width = self.style_values["border-{}-width".format(side)] border_style = self.style_settings["border-{}-style".format(side)] border_color_style = self.style_settings["border-{}-color".format(side)] border_color = "transparent" if border_color_style != "transparent": border_color = self.style_values["border-{}-color".format(side)] else: continue if border_style in ("none", "hidden") or (border_width < 1) or \ (border_color == "transparent"): continue self.draw_border_side(screen, side, element_dims, border_width, border_color, border_style) def draw(self, screen): """ Draw the widget instance to a surface using css properties. Always recalculate the settings, in case the style has been updated, or an attribute has changed that may affect the style. :param screen: A pygame surface upon which to draw the widget :type screen: :py:class:`pygame.Surface` """ self.debug("{} inst {}: WidgetInstance.draw(screen={})". format(self.kind.name, self.inst_id, screen)) if not self.visible: return style_hash = self.get_widget_instance_style_hash() # self.debug("Find style {} in {} ..".format(style_hash, style_info)) style_info = WidgetStyle(self.game_engine.global_style_settings.get_style(**style_hash)) self.get_widget_settings(style_info) # self.debug("Style settings: {}".format(self.style_settings)) self.get_outer_setting_values(screen) outer_dims = self.calculate_outer_dimensions() max_inner_dims = (screen.get_width() - outer_dims[0], screen.get_height() - outer_dims[1]) self.get_inner_setting_values(max_inner_dims) self.get_color_values() # self.debug("Style values: {}".format(self.style_values)) self.draw_border(screen, outer_dims) def get_widget_instance_style_hash(self): """ Collect widget instance style information for comparison with stylesheet settings. Subclasses should start here and add attribute matches (e.g. a checkbutton could match on "checked" attribute "on" or "off") """ self.debug("WidgetInstance.get_widget_instance_style_hash()") props = { "element_type": self.kind.name, "element_id": self.widget_id, } if len(self.widget_class) > 0: props["element_class"] = self.widget_class if self.hover: props["pseudo_class"] = "hover" return props class LabelWidgetInstance(WidgetInstance): # WIDGET_INSTANCE_SUBCLASS_SYMBOLS = { # "label": "", # "font": "", # "font_size": "initial", # "font_style": "initial", # "font_weight": "initial", # "text_decoration": "initial", # "text_transform": "initial", # "text_align": "initial", # "vertical_align": "initial", # "color": "black" # } FONT_SIZE_CATEGORIES = { "small": 10, "medium": 14, "large": 20 } def __init__(self, kind, screen, screen_dims, id_, settings=None, **kwargs): super(LabelWidgetInstance, self).__init__(kind, screen, screen_dims, id_, settings, **kwargs) self.font_resource = None self.get_font_resource() self._font_point_size = 12 @property def label(self): return self.symbols["label"] @label.setter def label(self, new_label): try: self.symbols["label"] = str(new_label) except ValueError: pass @property def font(self): return self.symbols["font"] @font.setter def font(self, new_font): try: self.symbols["font"] = str(new_font) self.get_font_resource() except ValueError: pass @property def font_size(self): return self.symbols["font_size"] @font_size.setter def font_size(self, new_size): self.symbols["font_size"] = new_size def get_font_resource(self): self.debug("LabelWidgetInstance.get_font_resource()") if (len(self.font) == 0) or (self.font not in self.kind.game_engine.resources['fonts'].keys()): # revert to a system font, if found if hasattr(self.kind.game_engine, 'system_font'): self.font_resource = self.kind.game_engine.system_font else: return None else: self.font_resource = self.kind.game_engine.resources['fonts'][self.font] def calc_label_size(self): self.debug("LabelWidgetInstance.calc_label_size()") if self.font_resource is None: return (0, 0) if len(self.label) == 0: return (0, 0) font_rndr = self.font_resource.get_font_renderer() return font_rndr.calc_render_size(self.label) def get_min_size(self): self.debug("LabelWidgetInstance.get_min_size()") total_width, total_height = super(LabelWidgetInstance, self).get_min_size() text_width, text_height = self.calc_label_size() total_width += text_width total_height += text_height return (total_width, total_height) def get_element_dimensions(self): self.debug("LabelWidgetInstance.get_element_dimensions()") element_width = self.style_values["min-width"] element_height = self.style_values["min-height"] label_width, label_height = self.calc_label_size() if self.style_settings["width"] != "auto": element_width = self.style_values["width"] elif label_width > element_width: element_width = label_width if self.style_settings["height"] != "auto": element_height = self.style_values["height"] elif label_height > element_height: element_height = label_height return (element_width, element_height) def draw_text(self, surface): self.debug("LabelWidgetInstance.draw_text(surface={})".format(surface)) surf_width = surface.get_width() surf_height = surface.get_height() font_rndr = self.font_resource.get_font_renderer() # apply horizontal, vertical alignment text_width, text_height = font_rndr.calc_render_size(self.label) top_left = coord.Coordinate(0, 0) if surf_width > text_width: if self.style_settings["text-align"] == "center": top_left.x = (surf_width / 2) - (text_width / 2) elif self.style_settings["text-align"] == "right": top_left.x = surf_width - text_width if surf_height > text_height: if self.style_settings["vertical-align"] == "middle": top_left.y = (surf_height / 2) - (text_height / 2) elif self.style_settings["vertical-align"] == "bottom": top_left.y = surf_height - text_height font_rndr.render_text(surface, top_left, self.label, self.style_values["color"]) def draw(self, screen): self.debug("LabelWidgetInstance.draw(screen={})".format(screen)) # draw any visible borders super(LabelWidgetInstance, self).draw(screen) # create a subsurface big enough to hold the element dimensions label_width, label_height = self.calc_label_size() if (label_width > 0) and (label_height > 0): subsurf_width, subsurf_height = self.get_element_dimensions() subsurf_left = super(LabelWidgetInstance, self).calculate_left_outer_border_size() subsurf_top = super(LabelWidgetInstance, self).calculate_top_outer_border_size() subsurf_rect = pygame.Rect(subsurf_left, subsurf_top, subsurf_width, subsurf_height) subsurf = screen.subsurface(subsurf_rect) self.draw_text(subsurf) class WidgetObjectTypeInvalid(Exception): pass class WidgetObjectType(object_type.ObjectType): DEFAULT_VISIBLE = False # subclasses set this to their own instance type WIDGET_INSTANCE_TYPE = WidgetInstance # subclasses can add their own YAML properties by setting this class # variable to a list of tuples [(entry_name, entry_type), ..] where # entry_type is usually a standard type: str, int, or bool WIDGET_SUBCLASS_KW_ENTRIES = [] @classmethod def gen_kwargs_from_yaml_obj(cls, obj_name, obj_yaml, game_engine): kwargs = super(WidgetObjectType, cls).gen_kwargs_from_yaml_obj(obj_name, obj_yaml, game_engine) kwargs.update({ "visible": WidgetObjectType.DEFAULT_VISIBLE, }) if "visible" in obj_yaml.keys(): kwargs["visible"] = (obj_yaml["visible"] is True) for kw_entry, entry_type in cls.WIDGET_SUBCLASS_KW_ENTRIES: if kw_entry in obj_yaml.keys(): if isinstance(entry_type, bool): kwargs[kw_entry] = (obj_yaml[kw_entry] is True) else: # set the kwarg if the type conversion succeeds try: kwargs[kw_entry] = entry_type(obj_yaml[kw_entry]) except ValueError: pass return kwargs def __init__(self, widget_name, game_engine, **kwargs): super(WidgetObjectType, self).__init__(widget_name, game_engine, **kwargs) #: Flag whether this widget type is a container for other widgets self.is_container = False self.visible = self.DEFAULT_VISIBLE # default draw action sequence draws the object's sprite self["draw"] = action_sequence.ActionSequence() self["draw"].append_action(action.DrawAction("draw_self")) if kwargs and "visible" in kwargs: self.visible = kwargs["visible"] def make_new_instance(self, screen, instance_properties=None): """ Generate a new instance of the widget type in response to :py:meth:`~pygame_maker.actors.object_type.ObjectType.create_instance` :param screen: The surface the instance will be drawn upon. The instance can use this surface's (often a sub-surface's) width and height parameters to determine child widget placement :type screen: :py:class:`pygame.Surface` :param instance_properties: A hash of settings to be applied. See kwargs entry in :py:meth:`~pygame_maker.actors.simple_object_instance.SimpleObjectInstance.__init__` :type instance_properties: dict """ self.debug("WidgetObjectType.make_new_instance(screen={}, instance_properties={})". format(screen, instance_properties)) screen_dims = (screen.get_width(), screen.get_height()) new_instance = self.WIDGET_INSTANCE_TYPE(self, screen, screen_dims, self._id, instance_properties) self.instance_list.append(new_instance) def update(self): """ Update all instances of this widget type. """ pass def draw(self, in_event): """Draw all visible instances.""" self.debug("WidgetObjectType.draw(in_event={})".format(in_event)) if len(self.instance_list) > 0: for inst in self.instance_list: # self.debug("Check inst {}".format(inst)) if inst.parent is not None: continue if inst.visible: # self.debug("Draw visible inst {}".format(inst)) inst.draw(inst.screen) class LabelWidgetObjectType(WidgetObjectType): WIDGET_INSTANCE_TYPE = LabelWidgetInstance
from __future__ import absolute_import # Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm # as described in: # Rose, S., D. Engel, N. Cramer, and W. Cowley (2010). # Automatic keyword extraction from indi-vidual documents. # In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd. import re import operator def is_number(s): try: float(s) if '.' in s else int(s) return True except ValueError: return False def SmartStopList(): from .stoplists import SmartStopList return SmartStopList.words() def FoxStopList(): from .stoplists import FoxStopList return FoxStopList.words() def MySQLStopList(): from .stoplists import MySQLStopList return MySQLStopList.words() def NLTKStopList(): from .stoplists import NLTKStopList return NLTKStopList.words() def load_stop_words(stop_word_file, divide, delimiter): """ Utility function to load stop words from a file and return as a list of words @param stop_word_file Path and file name of a file containing stop words. @return list A list of stop words. """ stop_words = [] if divide: for line in open(stop_word_file): for word in #magic re.split code if word != '' or ' ': stop_words.append(word) else: for line in open(stop_word_file): stop_words.append(line) return stop_words def load_stop_words_delimiter(stop_word_file, delimiter): """ Utility function to load stop words from a file and return as a list of words @param stop_word_file Path and file name of a file containing stop words. @return list A list of stop words. """ stop_words = [] for line in open(stop_word_file): for word in line.split(delimiter): # in case more than one per line #handles .csvs or a comma seperated list, or equivalent. #'asd,'.split(',') returns ['asd', ''] but 'asd '.split() returns 'asd' if word !='': stop_words.append(word) return stop_words def separate_words(text, min_word_return_size): """ Utility function to return a list of all words that are have a length greater than a specified number of characters. @param text The text that must be split in to words. @param min_word_return_size The minimum no of characters a word must have to be included. """ splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]') words = [] for single_word in splitter.split(text): current_word = single_word.strip().lower() # leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word): words.append(current_word) return words def split_sentences(text): """ Utility function to return a list of sentences. @param text The text that must be split in to sentences. """ sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s') sentences = sentence_delimiters.split(text) return sentences def build_stop_word_regex(stop_word_list): stop_word_regex_list = [] for word in stop_word_list: word_regex = r'\b' + word + r'(?![\w-])' stop_word_regex_list.append(word_regex) stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE) return stop_word_pattern def generate_candidate_keywords(sentence_list, stopword_pattern): phrase_list = [] for s in sentence_list: tmp = re.sub(stopword_pattern, '|', s.strip()) phrases = tmp.split("|") for phrase in phrases: phrase = phrase.strip().lower() if phrase != "": phrase_list.append(phrase) return phrase_list def calculate_word_scores(phraseList): word_frequency = {} word_degree = {} for phrase in phraseList: word_list = separate_words(phrase, 0) word_list_length = len(word_list) word_list_degree = word_list_length - 1 for word in word_list: word_frequency.setdefault(word, 0) word_frequency[word] += 1 word_degree.setdefault(word, 0) word_degree[word] += word_list_degree for item in word_frequency: word_degree[item] = word_degree[item] + word_frequency[item] # Calculate Word scores = deg(w)/frew(w) word_score = {} for item in word_frequency: word_score.setdefault(item, 0) word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) return word_score def generate_candidate_keyword_scores(phrase_list, word_score): keyword_candidates = {} for phrase in phrase_list: keyword_candidates.setdefault(phrase, 0) word_list = separate_words(phrase, 0) candidate_score = 0 for word in word_list: candidate_score += word_score[word] keyword_candidates[phrase] = candidate_score return keyword_candidates class Rake(object): def __init__(self, stop_words, divide = True, delimiter = ' '): #lets users call predefined stopwords easily in a platform agnostic manner or use their own list if isinstance(stop_words, list): self.__stop_words_pattern = build_stop_word_regex(stop_words) else: if divide: self.__stop_words_pattern = build_stop_word_regex(load_stop_words(stop_words,divide,delimiter)) """delimiter != ' ' and divide == False: #send error and stop here else: if divide == True: else: self.__stop_words_pattern = build_stop_word_regex(load_stop_words_delimiter(stop_words,delimiter))""" def run(self, text): sentence_list = split_sentences(text) phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern) word_scores = calculate_word_scores(phrase_list) keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores) sorted_keywords = sorted(keyword_candidates.items(), key=operator.itemgetter(1), reverse=True) return sorted_keywords goes full regex from __future__ import absolute_import # Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm # as described in: # Rose, S., D. Engel, N. Cramer, and W. Cowley (2010). # Automatic keyword extraction from indi-vidual documents. # In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd. import re import operator def is_number(s): try: float(s) if '.' in s else int(s) return True except ValueError: return False def SmartStopList(): from .stoplists import SmartStopList return SmartStopList.words() def FoxStopList(): from .stoplists import FoxStopList return FoxStopList.words() def MySQLStopList(): from .stoplists import MySQLStopList return MySQLStopList.words() def NLTKStopList(): from .stoplists import NLTKStopList return NLTKStopList.words() def load_stop_words(stop_word_file, divide, delimiter): """ Utility function to load stop words from a file and return as a list of words @param stop_word_file Path and file name of a file containing stop words. @return list A list of stop words. """ stop_words = [] if divide: for line in open(stop_word_file): for word in re.split(delimiter, stopword_file): if word != '' or ' ': stop_words.append(word) else: for line in open(stop_word_file): if line != '' or ' ': #I figure this is going to make someones life much easier stop_words.append(line) return stop_words def separate_words(text, min_word_return_size): """ Utility function to return a list of all words that are have a length greater than a specified number of characters. @param text The text that must be split in to words. @param min_word_return_size The minimum no of characters a word must have to be included. """ splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]') words = [] for single_word in splitter.split(text): current_word = single_word.strip().lower() # leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word): words.append(current_word) return words def split_sentences(text): """ Utility function to return a list of sentences. @param text The text that must be split in to sentences. """ sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s') sentences = sentence_delimiters.split(text) return sentences def build_stop_word_regex(stop_word_list): stop_word_regex_list = [] for word in stop_word_list: word_regex = r'\b' + word + r'(?![\w-])' stop_word_regex_list.append(word_regex) stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE) return stop_word_pattern def generate_candidate_keywords(sentence_list, stopword_pattern): phrase_list = [] for s in sentence_list: tmp = re.sub(stopword_pattern, '|', s.strip()) phrases = tmp.split("|") for phrase in phrases: phrase = phrase.strip().lower() if phrase != "": phrase_list.append(phrase) return phrase_list def calculate_word_scores(phraseList): word_frequency = {} word_degree = {} for phrase in phraseList: word_list = separate_words(phrase, 0) word_list_length = len(word_list) word_list_degree = word_list_length - 1 for word in word_list: word_frequency.setdefault(word, 0) word_frequency[word] += 1 word_degree.setdefault(word, 0) word_degree[word] += word_list_degree for item in word_frequency: word_degree[item] = word_degree[item] + word_frequency[item] # Calculate Word scores = deg(w)/frew(w) word_score = {} for item in word_frequency: word_score.setdefault(item, 0) word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) return word_score def generate_candidate_keyword_scores(phrase_list, word_score): keyword_candidates = {} for phrase in phrase_list: keyword_candidates.setdefault(phrase, 0) word_list = separate_words(phrase, 0) candidate_score = 0 for word in word_list: candidate_score += word_score[word] keyword_candidates[phrase] = candidate_score return keyword_candidates class Rake(object): def __init__(self, stop_words, divide = True, delimiter = '\W+'): #lets users call predefined stopwords easily in a platform agnostic manner or use their own list if isinstance(stop_words, list): self.__stop_words_pattern = build_stop_word_regex(stop_words) else: self.__stop_words_pattern = build_stop_word_regex(load_stop_words(stop_words,divide,delimiter)) """delimiter != ' ' and divide == False: #send error and stop here else: if divide == True: else: self.__stop_words_pattern = build_stop_word_regex(load_stop_words_delimiter(stop_words,delimiter))""" def run(self, text): sentence_list = split_sentences(text) phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern) word_scores = calculate_word_scores(phrase_list) keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores) sorted_keywords = sorted(keyword_candidates.items(), key=operator.itemgetter(1), reverse=True) return sorted_keywords
import time timeStamp = int(time.time()) timeArray = time.localtime(timeStamp) otherStyleTime = time.strftime("%Y/%m/%d %H:%M:%S", timeArray) txt = open('Diary.txt', 'r+') txt = open('Diary.txt', 'a+') print "Here's your diary:" print txt.read() print "How are you today?" content = raw_input(">") txt.write(otherStyleTime) txt.write("\n") txt.write(content) txt.write("\n") txt.close update diary in Mac import time timeStamp = int(time.time()) timeArray = time.localtime(timeStamp) otherStyleTime = time.strftime("%Y/%m/%d %H:%M:%S", timeArray) txt = open('Diary.txt', 'a+') txt = open('Diary.txt', 'r+') print "Here's your diary:" print txt.read() txt.close print "How are you today?" content = raw_input(">") txt.write(otherStyleTime) txt.write("\n") txt.write(content) txt.write("\n") txt.close
# Copyright Vertex.AI. from __future__ import print_function import hashlib import os import platform import shutil import sys from subprocess import call, check_call MAIN = "__BZL_MAIN__" REQUIREMENTS = ["__BZL_REQUIREMENTS__"] VENV_ARGS = __BZL_VENV_ARGS__ def _find_in_runfiles(logical_name): key = logical_name if key.startswith('external/'): key = key[len('external/'):] try: return _find_in_runfiles.manifest.get(key, logical_name) except AttributeError: _find_in_runfiles.manifest = {} if 'RUNFILES_DIR' in os.environ: manifest_filename = os.path.join(os.environ['RUNFILES_DIR'], 'MANIFEST') if os.path.exists(manifest_filename): with open(manifest_filename) as manifest: for line in manifest: (logical, physical) = line.split(' ', 2) _find_in_runfiles.manifest[logical] = physical.strip() return _find_in_runfiles.manifest.get(key, logical_name) class VirtualEnv(object): def __init__(self, requirements): self._requirements = requirements hasher = hashlib.md5() for arg in VENV_ARGS: hasher.update(arg) for requirement in requirements: with open(_find_in_runfiles(requirement)) as file_: hasher.update(file_.read()) self._path = os.path.join(os.path.expanduser('~'), '.t2', 'venv', hasher.hexdigest()) if platform.system() == 'Windows': self._venv_bin = os.path.join(self._path, 'Scripts') else: self._venv_bin = os.path.join(self._path, 'bin') self._pip = os.path.join(self._venv_bin, 'pip') self.python = os.path.join(self._venv_bin, 'python') def make(self): try: if not os.path.exists(self._path): if platform.system() == 'Windows': vpython = [] else: vpython = ['-p', 'python2'] check_call(['virtualenv'] + vpython + VENV_ARGS + [self._path]) for requirement in self._requirements: check_call( [self.python, self._pip, 'install', '-r', _find_in_runfiles(requirement)]) except: if os.path.exists(self._path): shutil.rmtree(self._path) raise env = dict(os.environ) env['VIRTUAL_ENV'] = self._path env['PATH'] = os.pathsep.join([self._venv_bin, os.getenv('PATH', "")]) if platform.system() == 'Windows': env['PATHEXT'] = '.EXE' return env def main(): venv = VirtualEnv(REQUIREMENTS) env = venv.make() args = [venv.python, MAIN] + sys.argv[1:] args[1:] = [_find_in_runfiles(arg) for arg in args[1:]] print('Running in venv: {}'.format(venv._path)) sys.exit(call(args, env=env)) if __name__ == '__main__': main() Use pip directly; use RUNFILES_MANIGEST_FILE (#129) # Copyright Vertex.AI. from __future__ import print_function import hashlib import os import platform import shutil import sys from subprocess import call, check_call MAIN = "__BZL_MAIN__" REQUIREMENTS = ["__BZL_REQUIREMENTS__"] VENV_ARGS = __BZL_VENV_ARGS__ def _find_in_runfiles(logical_name): key = logical_name if key.startswith('external/'): key = key[len('external/'):] try: return _find_in_runfiles.manifest.get(key, logical_name) except AttributeError: _find_in_runfiles.manifest = {} manifest_filename = None if 'RUNFILES_MANIFEST_FILE' in os.environ: manifest_filename = os.environ['RUNFILES_MANIFEST_FILE'] elif 'RUNFILES_DIR' in os.environ: manifest_filename = os.path.join(os.environ['RUNFILES_DIR'], 'MANIFEST') if manifest_filename and os.path.exists(manifest_filename): with open(manifest_filename) as manifest: for line in manifest: (logical, physical) = line.split(' ', 2) _find_in_runfiles.manifest[logical] = physical.strip() return _find_in_runfiles.manifest.get(key, logical_name) class VirtualEnv(object): def __init__(self, requirements): self._requirements = requirements hasher = hashlib.md5() for arg in VENV_ARGS: hasher.update(arg) for requirement in requirements: with open(_find_in_runfiles(requirement)) as file_: hasher.update(file_.read()) self._path = os.path.join(os.path.expanduser('~'), '.t2', 'venv', hasher.hexdigest()) if platform.system() == 'Windows': self._venv_bin = os.path.join(self._path, 'Scripts') else: self._venv_bin = os.path.join(self._path, 'bin') self._pip = os.path.join(self._venv_bin, 'pip') self.python = os.path.join(self._venv_bin, 'python') def make(self): try: if not os.path.exists(self._path): if platform.system() == 'Windows': vpython = [] else: vpython = ['-p', 'python2'] check_call(['virtualenv'] + vpython + VENV_ARGS + [self._path]) for requirement in self._requirements: check_call( [self._pip, 'install', '-r', _find_in_runfiles(requirement)]) except: if os.path.exists(self._path): shutil.rmtree(self._path) raise env = dict(os.environ) env['VIRTUAL_ENV'] = self._path env['PATH'] = os.pathsep.join([self._venv_bin, os.getenv('PATH', "")]) if platform.system() == 'Windows': env['PATHEXT'] = '.EXE' return env def main(): venv = VirtualEnv(REQUIREMENTS) env = venv.make() args = [venv.python, MAIN] + sys.argv[1:] args[1:] = [_find_in_runfiles(arg) for arg in args[1:]] print('Running in venv: {}'.format(venv._path)) sys.exit(call(args, env=env)) if __name__ == '__main__': main()
# built-ins import itertools as it import sys import argparse import random import logging import json import collections from copy import deepcopy # libraries from numpy import (array, mean, zeros, zeros_like, where, unique, newaxis, nonzero, median, float, ones, arange, inf, isnan, flatnonzero, unravel_index, bincount) import numpy as np from scipy.stats import sem from scipy import sparse from scipy.sparse import lil_matrix from scipy.misc import comb as nchoosek from scipy.ndimage.measurements import label from scipy import ndimage as ndi import networkx as nx from networkx import Graph, biconnected_components from networkx.algorithms.traversal.depth_first_search import dfs_preorder_nodes from skimage.segmentation import relabel_sequential from viridis import tree # local modules from . import morpho from . import sparselol as lol from . import iterprogress as ip from . import optimized as opt from .ncut import ncutW from .mergequeue import MergeQueue from .evaluate import merge_contingency_table, split_vi, xlogx from . import evaluate as ev from . import features from . import classify from .classify import get_classifier, \ unique_learning_data_elements, concatenate_data_elements from .dtypes import label_dtype arguments = argparse.ArgumentParser(add_help=False) arggroup = arguments.add_argument_group('Agglomeration options') arggroup.add_argument('-t', '--thresholds', nargs='+', default=[128], type=float, metavar='FLOAT', help='''The agglomeration thresholds. One output file will be written for each threshold.''' ) arggroup.add_argument('-l', '--ladder', type=int, metavar='SIZE', help='Merge any bodies smaller than SIZE.' ) arggroup.add_argument('-p', '--pre-ladder', action='store_true', default=True, help='Run ladder before normal agglomeration (default).' ) arggroup.add_argument('-L', '--post-ladder', action='store_false', dest='pre_ladder', help='Run ladder after normal agglomeration instead of before (SLOW).' ) arggroup.add_argument('-s', '--strict-ladder', type=int, metavar='INT', default=1, help='''Specify the strictness of the ladder agglomeration. Level 1 (default): merge anything smaller than the ladder threshold as long as it's not on the volume border. Level 2: only merge smaller bodies to larger ones. Level 3: only merge when the border is larger than or equal to 2 pixels.''' ) arggroup.add_argument('-M', '--low-memory', action='store_true', help='''Use less memory at a slight speed cost. Note that the phrase 'low memory' is relative.''' ) arggroup.add_argument('--disallow-shared-boundaries', action='store_false', dest='allow_shared_boundaries', help='''Watershed pixels that are shared between more than 2 labels are not counted as edges.''' ) arggroup.add_argument('--allow-shared-boundaries', action='store_true', default=True, help='''Count every watershed pixel in every edge in which it participates (default: True).''' ) def conditional_countdown(seq, start=1, pred=bool): """Count down from 'start' each time pred(elem) is true for elem in seq. Used to know how many elements of a sequence remain that satisfy a predicate. Parameters ---------- seq : iterable Any sequence. start : int, optional The starting element. pred : function, type(next(seq)) -> bool A predicate acting on the elements of `seq`. Examples -------- >>> seq = range(10) >>> cc = conditional_countdown(seq, start=5, pred=lambda x: x % 2 == 1) >>> next(cc) 5 >>> next(cc) 4 >>> next(cc) 4 >>> next(cc) 3 """ remaining = start for elem in seq: if pred(elem): remaining -= 1 yield remaining ############################ # Merge priority functions # ############################ def oriented_boundary_mean(g, n1, n2): return mean(g.oriented_probabilities_r[g[n1][n2]['boundary']]) def boundary_mean(g, n1, n2): return mean(g.probabilities_r[g[n1][n2]['boundary']]) def boundary_median(g, n1, n2): return median(g.probabilities_r[g[n1][n2]['boundary']]) def approximate_boundary_mean(g, n1, n2): """Return the boundary mean as computed by a MomentsFeatureManager. The feature manager is assumed to have been set up for g at construction. """ return g.feature_manager.compute_edge_features(g, n1, n2)[1] def make_ladder(priority_function, threshold, strictness=1): def ladder_function(g, n1, n2): s1 = g.node[n1]['size'] s2 = g.node[n2]['size'] ladder_condition = \ (s1 < threshold and not g.at_volume_boundary(n1)) or \ (s2 < threshold and not g.at_volume_boundary(n2)) if strictness >= 2: ladder_condition &= ((s1 < threshold) != (s2 < threshold)) if strictness >= 3: ladder_condition &= len(g[n1][n2]['boundary']) > 2 if ladder_condition: return priority_function(g, n1, n2) else: return inf return ladder_function def no_mito_merge(priority_function): def predict(g, n1, n2): frozen = (n1 in g.frozen_nodes or n2 in g.frozen_nodes or (n1, n2) in g.frozen_edges) if frozen: return np.inf else: return priority_function(g, n1, n2) return predict def mito_merge(): def predict(g, n1, n2): if n1 in g.frozen_nodes and n2 in g.frozen_nodes: return np.inf elif (n1, n2) in g.frozen_edges: return np.inf elif n1 not in g.frozen_nodes and n2 not in g.frozen_nodes: return np.inf else: if n1 in g.frozen_nodes: mito = n1 cyto = n2 else: mito = n2 cyto = n1 if g.node[mito]['size'] > g.node[cyto]['size']: return np.inf else: return 1.0 - (float(len(g[mito][cyto]['boundary']))/ sum([len(g[mito][x]['boundary']) for x in g.neighbors(mito)])) return predict def classifier_probability(feature_extractor, classifier): def predict(g, n1, n2): if n1 == g.boundary_body or n2 == g.boundary_body: return inf features = np.atleast_2d(feature_extractor(g, n1, n2)) try: prediction = classifier.predict_proba(features) prediction_arr = np.array(prediction, copy=False) if prediction_arr.ndim > 2: prediction_arr = prediction_arr[0] try: prediction = prediction_arr[0][1] except (TypeError, IndexError): prediction = prediction_arr[0] except AttributeError: prediction = classifier.predict(features)[0] return prediction return predict def ordered_priority(edges): d = {} n = len(edges) for i, (n1, n2) in enumerate(edges): score = float(i)/n d[(n1,n2)] = score d[(n2,n1)] = score def ord(g, n1, n2): return d.get((n1,n2), inf) return ord def expected_change_vi(feature_extractor, classifier, alpha=1.0, beta=1.0): prob_func = classifier_probability(feature_extractor, classifier) def predict(g, n1, n2): p = prob_func(g, n1, n2) # Prediction from the classifier # Calculate change in VI if n1 and n2 should not be merged v = compute_local_vi_change( g.node[n1]['size'], g.node[n2]['size'], g.volume_size ) # Return expected change return (p*alpha*v + (1.0-p)*(-beta*v)) return predict def compute_local_vi_change(s1, s2, n): """Compute change in VI if we merge disjoint sizes s1,s2 in a volume n.""" py1 = float(s1)/n py2 = float(s2)/n py = py1+py2 return -(py1*np.log2(py1) + py2*np.log2(py2) - py*np.log2(py)) def compute_true_delta_vi(ctable, n1, n2): p1 = ctable[n1].sum() p2 = ctable[n2].sum() p3 = p1+p2 p1g_log_p1g = xlogx(ctable[n1]).sum() p2g_log_p2g = xlogx(ctable[n2]).sum() p3g_log_p3g = xlogx(ctable[n1]+ctable[n2]).sum() return p3*np.log2(p3) - p1*np.log2(p1) - p2*np.log2(p2) - \ 2*(p3g_log_p3g - p1g_log_p1g - p2g_log_p2g) def expected_change_rand(feature_extractor, classifier, alpha=1.0, beta=1.0): prob_func = classifier_probability(feature_extractor, classifier) def predict(g, n1, n2): p = float(prob_func(g, n1, n2)) # Prediction from the classifier v = compute_local_rand_change( g.node[n1]['size'], g.node[n2]['size'], g.volume_size ) return p*v*alpha + (1.0-p)*(-beta*v) return predict def compute_local_rand_change(s1, s2, n): """Compute change in rand if we merge disjoint sizes s1,s2 in volume n.""" return float(s1*s2)/nchoosek(n,2) def compute_true_delta_rand(ctable, n1, n2, n): """Compute change in RI obtained by merging rows n1 and n2. This function assumes ctable is normalized to sum to 1. """ localct = n * ctable[(n1, n2), :] total = localct.data.sum() sqtotal = (localct.data ** 2).sum() delta_sxy = 1. / 2 * ((np.array(localct.sum(axis=0)) ** 2).sum() - sqtotal) delta_sx = 1. / 2 * (total ** 2 - (np.array(localct.sum(axis=1)) ** 2).sum()) return (2 * delta_sxy - delta_sx) / nchoosek(n, 2) def boundary_mean_ladder(g, n1, n2, threshold, strictness=1): f = make_ladder(boundary_mean, threshold, strictness) return f(g, n1, n2) def boundary_mean_plus_sem(g, n1, n2, alpha=-6): bvals = g.probabilities_r[g[n1][n2]['boundary']] return mean(bvals) + alpha*sem(bvals) def random_priority(g, n1, n2): if n1 == g.boundary_body or n2 == g.boundary_body: return inf return random.random() class Rag(Graph): """Region adjacency graph for segmentation of nD volumes. Parameters ---------- watershed : array of int, shape (M, N, ..., P) The labeled regions of the image. Note: this is called `watershed` for historical reasons, but could refer to a superpixel map of any origin. probabilities : array of float, shape (M, N, ..., P[, Q]) The probability of each pixel of belonging to a particular class. Typically, this has the same shape as `watershed` and represents the probability that the pixel is part of a region boundary, but it can also have an additional dimension for probabilities of belonging to other classes, such as mitochondria (in biological images) or specific textures (in natural images). merge_priority_function : callable function, optional This function must take exactly three arguments as input (a Rag object and two node IDs) and return a single float. feature_manager : ``features.base.Null`` object, optional A feature manager object that controls feature computation and feature caching. mask : array of bool, shape (M, N, ..., P) A mask of the same shape as `watershed`, `True` in the positions to be processed when making a RAG, `False` in the positions to ignore. show_progress : bool, optional Whether to display an ASCII progress bar during long- -running graph operations. connectivity : int in {1, ..., `watershed.ndim`} When determining adjacency, allow neighbors along `connectivity` dimensions. channel_is_oriented : array-like of bool, shape (Q,), optional For multi-channel images, some channels, for example some edge detectors, have a specific orientation. In conjunction with the `orientation_map` argument, specify which channels have an orientation associated with them. orientation_map : array-like of float, shape (Q,) Specify the orientation of the corresponding channel. (2D images only) normalize_probabilities : bool, optional Divide the input `probabilities` by their maximum to ensure a range in [0, 1]. exclusions : array-like of int, shape (M, N, ..., P), optional Volume of same shape as `watershed`. Mark points in the volume with the same label (>0) to prevent them from being merged during agglomeration. For example, if `exclusions[45, 92] == exclusions[51, 105] == 1`, then segments `watershed[45, 92]` and `watershed[51, 105]` will never be merged, regardless of the merge priority function. isfrozennode : function, optional Function taking in a Rag object and a node id and returning a bool. If the function returns ``True``, the node will not be merged, regardless of the merge priority function. isfrozenedge : function, optional As `isfrozennode`, but the function should take the graph and *two* nodes, to specify an edge that cannot be merged. """ def __init__(self, watershed=array([], label_dtype), probabilities=array([]), merge_priority_function=boundary_mean, gt_vol=None, feature_manager=features.base.Null(), mask=None, show_progress=False, connectivity=1, channel_is_oriented=None, orientation_map=array([]), normalize_probabilities=False, exclusions=array([]), isfrozennode=None, isfrozenedge=None): super(Rag, self).__init__(weighted=False) self.show_progress = show_progress self.connectivity = connectivity self.pbar = (ip.StandardProgressBar() if self.show_progress else ip.NoProgressBar()) self.set_watershed(watershed, connectivity) self.set_probabilities(probabilities, normalize_probabilities) self.set_orientations(orientation_map, channel_is_oriented) self.merge_priority_function = merge_priority_function self.max_merge_score = -inf if mask is None: self.mask = np.ones(self.watershed_r.shape, dtype=bool) else: self.mask = morpho.pad(mask, True).ravel() self.build_graph_from_watershed() self.set_feature_manager(feature_manager) self.set_ground_truth(gt_vol) self.set_exclusions(exclusions) self.merge_queue = MergeQueue() self.tree = tree.Ultrametric(self.nodes()) self.frozen_nodes = set() if isfrozennode is not None: for node in self.nodes(): if isfrozennode(self, node): self.frozen_nodes.add(node) self.frozen_edges = set() if isfrozenedge is not None: for n1, n2 in self.edges(): if isfrozenedge(self, n1, n2): self.frozen_edges.add((n1,n2)) def __copy__(self): """Return a copy of the object and attributes. """ pr_shape = self.probabilities_r.shape g = super(Rag, self).copy() g.watershed_r = g.watershed.ravel() g.probabilities_r = g.probabilities.reshape(pr_shape) return g def copy(self): """Return a copy of the object and attributes. """ return self.__copy__() def extent(self, nodeid): try: ext = self.extents full_ext = [ext.indices[ext.indptr[f]:ext.indptr[f+1]] for f in self.node[nodeid]['fragments']] return np.concatenate(full_ext).astype(np.intp) except AttributeError: extent_array = opt.flood_fill(self.watershed, np.array(self.node[nodeid]['entrypoint']), np.fromiter(self.node[nodeid]['fragments'], dtype=int)) if len(extent_array) != self.node[nodeid]['size']: sys.stderr.write('Flood fill fail - found %d voxels but size' 'expected %d\n' % (len(extent_array), self.node[nodeid]['size'])) raveled_indices = np.ravel_multi_index(extent_array.T, self.watershed.shape) return set(raveled_indices) def real_edges(self, *args, **kwargs): """Return edges internal to the volume. The RAG actually includes edges to a "virtual" region that envelops the entire volume. This function returns the list of edges that are internal to the volume. Parameters ---------- *args, **kwargs : arbitrary types Arguments and keyword arguments are passed through to the ``edges()`` function of the ``networkx.Graph`` class. Returns ------- edge_list : list of tuples A list of pairs of node IDs, which are typically integers. See Also -------- real_edges_iter, networkx.Graph.edges """ return [e for e in super(Rag, self).edges(*args, **kwargs) if self.boundary_body not in e[:2]] def real_edges_iter(self, *args, **kwargs): """Return iterator of edges internal to the volume. The RAG actually includes edges to a "virtual" region that envelops the entire volume. This function returns the list of edges that are internal to the volume. Parameters ---------- *args, **kwargs : arbitrary types Arguments and keyword arguments are passed through to the ``edges()`` function of the ``networkx.Graph`` class. Returns ------- edges_iter : iterator of tuples An iterator over pairs of node IDs, which are typically integers. """ return (e for e in super(Rag, self).edges_iter(*args, **kwargs) if self.boundary_body not in e[:2]) def build_graph_from_watershed(self, idxs=None): """Build the graph object from the region labels. The region labels should have been set ahead of time using ``set_watershed()``. Parameters ---------- idxs : array-like of int, optional Linear indices into raveled volume array. If provided, the graph is built only for these indices. """ if self.watershed.size == 0: return # stop processing for empty graphs if idxs is None: idxs = arange(self.watershed.size, dtype=self.steps.dtype) idxs = idxs[self.mask[idxs]] # use only masked idxs self.add_node(self.boundary_body) labels = np.unique(self.watershed_r[idxs]) sizes = np.bincount(self.watershed_r) if not hasattr(self, 'extents'): self.extents = lol.extents(self.watershed) for nodeid in labels: self.add_node(nodeid) node = self.node[nodeid] node['size'] = sizes[nodeid] node['fragments'] = set([nodeid]) node['entrypoint'] = ( np.array(np.unravel_index(self.extent(nodeid)[0], self.watershed.shape))) inner_idxs = idxs[self.watershed_r[idxs] != self.boundary_body] if self.show_progress: inner_idxs = ip.with_progress(inner_idxs, title='Graph ', pbar=self.pbar) for idx in inner_idxs: nodeid = self.watershed_r[idx] ns = idx + self.steps ns = ns[self.mask[ns]] adj = self.watershed_r[ns] adj = set(adj) for v in adj: if v == nodeid: continue if self.has_edge(nodeid, v): self[nodeid][v]['boundary'].append(idx) else: self.add_edge(nodeid, v, boundary=[idx]) def set_feature_manager(self, feature_manager): """Set the feature manager and ensure feature caches are computed. Parameters ---------- feature_manager : ``features.base.Null`` object The feature manager to be used by this RAG. Returns ------- None """ self.feature_manager = feature_manager self.compute_feature_caches() def compute_feature_caches(self): """Use the feature manager to compute node and edge feature caches. Parameters ---------- None Returns ------- None """ for n in ip.with_progress( self.nodes(), title='Node caches ', pbar=self.pbar): self.node[n]['feature-cache'] = \ self.feature_manager.create_node_cache(self, n) for n1, n2 in ip.with_progress( self.edges(), title='Edge caches ', pbar=self.pbar): self[n1][n2]['feature-cache'] = \ self.feature_manager.create_edge_cache(self, n1, n2) def set_probabilities(self, probs=array([]), normalize=False): """Set the `probabilities` attributes of the RAG. For various reasons, including removing the need for bounds checking when looking for neighboring pixels, the volume of pixel-level probabilities is padded on all faces. In addition, this function adds an attribute `probabilities_r`, a raveled view of the padded probabilities array for quick access to individual voxels using linear indices. Parameters ---------- probs : array The input probabilities array. normalize : bool, optional If ``True``, the values in the array are scaled to be in [0, 1]. Returns ------- None """ if len(probs) == 0: self.probabilities = zeros_like(self.watershed) self.probabilities_r = self.probabilities.ravel() probs = probs.astype('float') if normalize and len(probs) > 1: probs -= probs.min() # ensure probs.min() == 0 probs /= probs.max() # ensure probs.max() == 1 sp = probs.shape sw = tuple(array(self.watershed.shape, dtype=int)-\ 2*self.pad_thickness*ones(self.watershed.ndim, dtype=int)) p_ndim = probs.ndim w_ndim = self.watershed.ndim padding = [inf]+(self.pad_thickness-1)*[0] if p_ndim == w_ndim: self.probabilities = morpho.pad(probs, padding) self.probabilities_r = self.probabilities.ravel()[:,newaxis] elif p_ndim == w_ndim+1: axes = list(range(p_ndim-1)) self.probabilities = morpho.pad(probs, padding, axes) self.probabilities_r = self.probabilities.reshape( (self.watershed.size, -1)) def set_orientations(self, orientation_map, channel_is_oriented): """Set the orientation map of the probability image. Parameters ---------- orientation_map : array of float A map of angles of the same shape as the superpixel map. channel_is_oriented : 1D array-like of bool A vector having length the number of channels in the probability map. Returns ------- None """ if len(orientation_map) == 0: self.orientation_map = zeros_like(self.watershed) self.orientation_map_r = self.orientation_map.ravel() padding = [0]+(self.pad_thickness-1)*[0] self.orientation_map = morpho.pad(orientation_map, padding).astype(int) self.orientation_map_r = self.orientation_map.ravel() if channel_is_oriented is None: nchannels = 1 if self.probabilities.ndim==self.watershed.ndim \ else self.probabilities.shape[-1] self.channel_is_oriented = array([False]*nchannels) self.max_probabilities_r = zeros_like(self.probabilities_r) self.oriented_probabilities_r = zeros_like(self.probabilities_r) self.non_oriented_probabilities_r = self.probabilities_r else: self.channel_is_oriented = channel_is_oriented self.max_probabilities_r = \ self.probabilities_r[:, self.channel_is_oriented].max(axis=1) self.oriented_probabilities_r = \ self.probabilities_r[:, self.channel_is_oriented] self.oriented_probabilities_r = \ self.oriented_probabilities_r[ list(range(len(self.oriented_probabilities_r))), self.orientation_map_r] self.non_oriented_probabilities_r = \ self.probabilities_r[:, ~self.channel_is_oriented] def set_watershed(self, ws=array([], label_dtype), connectivity=1): """Set the initial segmentation volume (watershed). The initial segmentation is called `watershed` for historical reasons only. Parameters ---------- ws : array of int The initial segmentation. connectivity : int in {1, ..., `ws.ndim`}, optional The pixel neighborhood. Returns ------- None """ ws = ws.astype(label_dtype) try: self.boundary_body = np.max(ws) + 1 except ValueError: # empty watershed given self.boundary_body = 1 self.volume_size = ws.size if ws.size > 0: ws, _, inv = relabel_sequential(ws) self.inverse_watershed_map = inv # translates to original labels self.watershed = morpho.pad(ws, self.boundary_body) self.watershed_r = self.watershed.ravel() self.pad_thickness = 1 self.steps = morpho.raveled_steps_to_neighbors(self.watershed.shape, connectivity) def set_ground_truth(self, gt=None): """Set the ground truth volume. This is useful for tracking segmentation accuracy over time. Parameters ---------- gt : array of int A ground truth segmentation of the same volume passed to ``set_watershed``. Returns ------- None """ if gt is not None: gtm = gt.max()+1 gt_ignore = [0, gtm] if (gt==0).any() else [gtm] seg_ignore = [0, self.boundary_body] if \ (self.watershed==0).any() else [self.boundary_body] self.gt = morpho.pad(gt, gtm) self.rig = merge_contingency_table(self.watershed, self.gt, ignore_seg=seg_ignore, ignore_gt=gt_ignore) else: self.gt = None # null pattern to transparently allow merging of nodes. # Bonus feature: counts how many sp's went into a single node. try: self.rig = ones(2 * self.watershed.max() + 1) except ValueError: self.rig = ones(2 * self.number_of_nodes() + 1) def set_exclusions(self, excl): """Set an exclusion volume, forbidding certain merges. Parameters ---------- excl : array of int Exclusions work as follows: the volume `excl` is the same shape as the initial segmentation (see ``set_watershed``), and consists of mostly 0s. Any voxels with *the same* non-zero label will not be allowed to merge during agglomeration (provided they were not merged in the initial segmentation). This allows manual separation *a priori* of difficult-to- -segment regions. Returns ------- None """ if excl.size != 0: excl = morpho.pad(excl, [0]*self.pad_thickness) for n in self.nodes(): if excl.size != 0: eids = unique(excl.ravel()[self.extent(n)]) eids = eids[flatnonzero(eids)] self.node[n]['exclusions'] = set(list(eids)) else: self.node[n]['exclusions'] = set() def build_merge_queue(self): """Build a queue of node pairs to be merged in a specific priority. Returns ------- mq : MergeQueue object A MergeQueue is a Python ``deque`` with a specific element structure: a list of length 4 containing: - the merge priority (any ordered type) - a 'valid' flag - and the two nodes in arbitrary order The valid flag allows one to "remove" elements from the queue in O(1) time by setting the flag to ``False``. Then, one checks the flag when popping elements and ignores those marked as invalid. One other specific feature is that there are back-links from edges to their corresponding queue items so that when nodes are merged, affected edges can be invalidated and reinserted in the queue with a new priority. """ queue_items = [] for l1, l2 in self.real_edges_iter(): w = self.merge_priority_function(self,l1,l2) qitem = [w, True, l1, l2] queue_items.append(qitem) self[l1][l2]['qlink'] = qitem self[l1][l2]['weight'] = w return MergeQueue(queue_items, with_progress=self.show_progress) def rebuild_merge_queue(self): """Build a merge queue from scratch and assign to self.merge_queue. See Also -------- build_merge_queue """ self.merge_queue = self.build_merge_queue() def agglomerate(self, threshold=0.5, save_history=False): """Merge nodes hierarchically until given edge confidence threshold. This is the main workhorse of the ``agglo`` module! Parameters ---------- threshold : float, optional The edge priority at which to stop merging. save_history : bool, optional Whether to save and return a history of all the merges made. Returns ------- history : list of tuple of int, optional The ordered history of node pairs merged. scores : list of float, optional The list of merge scores corresponding to the `history`. evaluation : list of tuple, optional The split VI after each merge. This is only meaningful if a ground truth volume was provided at build time. Notes ----- This function returns ``None`` when `save_history` is ``False``. """ if self.merge_queue.is_empty(): self.merge_queue = self.build_merge_queue() history, scores, evaluation = [], [], [] while len(self.merge_queue) > 0 and \ self.merge_queue.peek()[0] < threshold: merge_priority, _, n1, n2 = self.merge_queue.pop() self.update_frozen_sets(n1, n2) self.merge_nodes(n1, n2, merge_priority) if save_history: history.append((n1,n2)) scores.append(merge_priority) evaluation.append( (self.number_of_nodes()-1, self.split_vi()) ) if save_history: return history, scores, evaluation def agglomerate_count(self, stepsize=100, save_history=False): """Agglomerate until 'stepsize' merges have been made. This function is like ``agglomerate``, but rather than to a certain threshold, a certain number of merges are made, regardless of threshold. Parameters ---------- stepsize : int, optional The number of merges to make. save_history : bool, optional Whether to save and return a history of all the merges made. Returns ------- history : list of tuple of int, optional The ordered history of node pairs merged. scores : list of float, optional The list of merge scores corresponding to the `history`. evaluation : list of tuple, optional The split VI after each merge. This is only meaningful if a ground truth volume was provided at build time. Notes ----- This function returns ``None`` when `save_history` is ``False``. See Also -------- agglomerate """ if self.merge_queue.is_empty(): self.merge_queue = self.build_merge_queue() history, evaluation = [], [] i = 0 for i in range(stepsize): if len(self.merge_queue) == 0: break merge_priority, _, n1, n2 = self.merge_queue.pop() i += 1 self.merge_nodes(n1, n2, merge_priority) if save_history: history.append((n1, n2)) evaluation.append( (self.number_of_nodes()-1, self.split_vi()) ) if save_history: return history, evaluation def agglomerate_ladder(self, min_size=1000, strictness=2): """Merge sequentially all nodes smaller than `min_size`. Parameters ---------- min_size : int, optional The smallest allowable segment after ladder completion. strictness : {1, 2, 3}, optional `strictness == 1`: all nodes smaller than `min_size` are merged according to the merge priority function. `strictness == 2`: in addition to `1`, small nodes can only be merged to big nodes. `strictness == 3`: in addition to `2`, nodes sharing less than one pixel of boundary are not agglomerated. Returns ------- None Notes ----- Nodes that are on the volume boundary are not agglomerated. """ original_merge_priority_function = self.merge_priority_function self.merge_priority_function = make_ladder( self.merge_priority_function, min_size, strictness ) self.rebuild_merge_queue() self.agglomerate(inf) self.merge_priority_function = original_merge_priority_function self.merge_queue.finish() self.rebuild_merge_queue() max_score = max([qitem[0] for qitem in self.merge_queue.q]) for n in self.tree.nodes(): self.tree.node[n]['w'] -= max_score def learn_agglomerate(self, gts, feature_map, min_num_samples=1, learn_flat=True, learning_mode='strict', labeling_mode='assignment', priority_mode='active', memory=True, unique=True, random_state=None, max_num_epochs=10, min_num_epochs=2, max_num_samples=np.inf, classifier='random forest', active_function=classifier_probability, mpf=boundary_mean): """Agglomerate while comparing to ground truth & classifying merges. Parameters ---------- gts : array of int or list thereof The ground truth volume(s) corresponding to the current probability map. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. min_num_samples : int, optional Continue training until this many training examples have been collected. learn_flat : bool, optional Do a flat learning on the static graph with no agglomeration. learning_mode : {'strict', 'loose'}, optional In 'strict' mode, if a "don't merge" edge is encountered, it is added to the training set but the merge is not executed. In 'loose' mode, the merge is allowed to proceed. labeling_mode : {'assignment', 'vi-sign', 'rand-sign'}, optional How to decide whether two nodes should be merged based on the ground truth segmentations. ``'assignment'`` means the nodes are assigned to the ground truth node with which they share the highest overlap. ``'vi-sign'`` means the the VI change of the switch is used (negative is better). ``'rand-sign'`` means the change in Rand index is used (positive is better). priority_mode : string, optional One of: ``'active'``: Train a priority function with the data from previous epochs to obtain the next. ``'random'``: Merge edges at random. ``'mixed'``: Alternate between epochs of ``'active'`` and ``'random'``. ``'mean'``: Use the mean boundary value. (In this case, training is limited to 1 or 2 epochs.) ``'custom'``: Use the function provided by `mpf`. memory : bool, optional Keep the training data from all epochs (rather than just the most recent one). unique : bool, optional Remove duplicate feature vectors. random_state : int, optional If provided, this parameter is passed to `get_classifier` to set the random state and allow consistent results across tests. max_num_epochs : int, optional Do not train for longer than this (this argument *may* override the `min_num_samples` argument). min_num_epochs : int, optional Train for no fewer than this number of epochs. max_num_samples : int, optional Train for no more than this number of samples. classifier : string, optional Any valid classifier descriptor. See ``gala.classify.get_classifier()`` active_function : function (feat. map, classifier) -> function, optional Use this to create the next priority function after an epoch. mpf : function (Rag, node, node) -> float A merge priority function to use when ``priority_mode`` is ``'custom'``. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. alldata : list of list of array A list of lists like `data` above: one list for each epoch. Notes ----- The gala algorithm [1] uses the default parameters. For the LASH algorithm [2], use: - `learning_mode`: ``'loose'`` - `labeling_mode`: ``'rand-sign'`` - `memory`: ``False`` References ---------- .. [1] Nunez-Iglesias et al, Machine learning of hierarchical clustering to segment 2D and 3D images, PLOS ONE, 2013. .. [2] Jain et al, Learning to agglomerate superpixel hierarchies, NIPS, 2011. See Also -------- Rag """ learning_mode = learning_mode.lower() labeling_mode = labeling_mode.lower() priority_mode = priority_mode.lower() if priority_mode == 'mean' and unique: max_num_epochs = 2 if learn_flat else 1 if priority_mode in ['random', 'mean'] and not memory: max_num_epochs = 1 label_type_keys = {'assignment':0, 'vi-sign':1, 'rand-sign':2} if type(gts) != list: gts = [gts] # allow using single ground truth as input master_ctables = [merge_contingency_table(self.get_segmentation(), gt) for gt in gts] alldata = [] data = [[],[],[],[]] for num_epochs in range(max_num_epochs): ctables = deepcopy(master_ctables) if len(data[0]) > min_num_samples and num_epochs >= min_num_epochs: break if learn_flat and num_epochs == 0: alldata.append(self.learn_flat(gts, feature_map)) data = unique_learning_data_elements(alldata) if memory \ else alldata[-1] continue g = self.copy() if priority_mode == 'mean': g.merge_priority_function = boundary_mean elif num_epochs > 0 and priority_mode == 'active' or \ num_epochs % 2 == 1 and priority_mode == 'mixed': cl = get_classifier(classifier, random_state=random_state) feat, lab = classify.sample_training_data( data[0], data[1][:, label_type_keys[labeling_mode]], max_num_samples) cl = cl.fit(feat, lab) g.merge_priority_function = active_function(feature_map, cl) elif priority_mode == 'random' or \ (priority_mode == 'active' and num_epochs == 0): g.merge_priority_function = random_priority elif priority_mode == 'custom': g.merge_priority_function = mpf g.show_progress = False # bug in MergeQueue usage causes # progressbar crash. g.rebuild_merge_queue() alldata.append(g.learn_epoch(ctables, feature_map, learning_mode=learning_mode, labeling_mode=labeling_mode)) if memory: if unique: data = unique_learning_data_elements(alldata) else: data = concatenate_data_elements(alldata) else: data = alldata[-1] logging.debug('data size %d at epoch %d'%(len(data[0]), num_epochs)) return data, alldata def learn_flat(self, gts, feature_map): """Learn all edges on the graph, but don't agglomerate. Parameters ---------- gts : array of int or list thereof The ground truth volume(s) corresponding to the current probability map. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. See Also -------- learn_agglomerate """ if type(gts) != list: gts = [gts] # allow using single ground truth as input ctables = [merge_contingency_table(self.get_segmentation(), gt) for gt in gts] assignments = [ev.assignment_table(ct) for ct in ctables] return list(map(array, zip(*[ self.learn_edge(e, ctables, assignments, feature_map) for e in self.real_edges()]))) def learn_edge(self, edge, ctables, assignments, feature_map): """Determine whether an edge should be merged based on ground truth. Parameters ---------- edge : (int, int) tuple An edge in the graph. ctables : list of array A list of contingency tables determining overlap between the current segmentation and the ground truth. assignments : list of array Similar to the contingency tables, but each row is thresholded so each segment corresponds to exactly one ground truth segment. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. Returns ------- features : 1D array of float The feature vector for that edge. labels : 1D array of float, length 3 The labels determining whether the edge should be merged. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. weights : 1D array of float, length 2 The VI and RI change of the merge. nodes : tuple of int The given edge. """ n1, n2 = edge features = feature_map(self, n1, n2).ravel() # Calculate weights for weighting data points s1, s2 = [self.node[n]['size'] for n in [n1, n2]] weights = \ compute_local_vi_change(s1, s2, self.volume_size), \ compute_local_rand_change(s1, s2, self.volume_size) # Get the fraction of times that n1 and n2 assigned to # same segment in the ground truths cont_labels = [ [(-1)**(a[n1]==a[n2]).toarray().all() for a in assignments], [compute_true_delta_vi(ctable, n1, n2) for ctable in ctables], [-compute_true_delta_rand(ctable, n1, n2, self.volume_size) for ctable in ctables] ] labels = [np.sign(mean(cont_label)) for cont_label in cont_labels] if any(map(isnan, labels)) or any([label == 0 for label in labels]): logging.debug('NaN or 0 labels found. ' + ' '.join(map(str, [labels, (n1, n2)]))) labels = [1 if i==0 or isnan(i) or n1 in self.frozen_nodes or n2 in self.frozen_nodes or (n1, n2) in self.frozen_edges else i for i in labels] return features, labels, weights, (n1, n2) def learn_epoch(self, ctables, feature_map, learning_mode='permissive', labeling_mode='assignment'): """Learn the agglomeration process using various strategies. Parameters ---------- ctables : array of float or list thereof One or more contingency tables between own segments and gold standard segmentations feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. learning_mode : {'strict', 'permissive'}, optional If ``'strict'``, don't proceed with a merge when it goes against the ground truth. For historical reasons, 'loose' is allowed as a synonym for 'strict'. labeling_mode : {'assignment', 'vi-sign', 'rand-sign'}, optional Which label to use for `learning_mode`. Note that all labels are saved in the end. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. """ label_type_keys = {'assignment':0, 'vi-sign':1, 'rand-sign':2} assignments = [ev.csrRowExpandableCSR(asst) for asst in map(ev.assignment_table, ctables)] g = self data = [] while len(g.merge_queue) > 0: merge_priority, valid, n1, n2 = g.merge_queue.pop() if g.boundary_body in (n1, n2): continue dat = g.learn_edge((n1,n2), ctables, assignments, feature_map) data.append(dat) label = dat[1][label_type_keys[labeling_mode]] if learning_mode != 'strict' or label < 0: node_id = g.merge_nodes(n1, n2, merge_priority) for ctable, assignment in zip(ctables, assignments): ctable[node_id] = ctable[n1] + ctable[n2] ctable[n1] = 0 ctable[n2] = 0 assignment[node_id] = (ctable[node_id] == ctable[node_id].max()) assignment[n1] = 0 assignment[n2] = 0 return list(map(array, zip(*data))) def replay_merge_history(self, merge_seq, labels=None, num_errors=1): """Agglomerate according to a merge sequence, optionally labeled. Parameters ---------- merge_seq : iterable of pair of int The sequence of node IDs to be merged. labels : iterable of int in {-1, 0, 1}, optional A sequence matching `merge_seq` specifying whether a merge should take place or not. -1 or 0 mean "should merge", 1 otherwise. Returns ------- n : int Number of elements consumed from `merge_seq` e : (int, int) Last merge pair observed. Notes ----- The merge sequence and labels *must* be generators if you don't want to manually keep track of how much has been consumed. The merging continues until `num_errors` false merges have been encountered, or until the sequence is fully consumed. """ if labels is None: labels1 = it.repeat(False) labels2 = it.repeat(False) else: labels1 = (label > 0 for label in labels) labels2 = (label > 0 for label in labels) counter = it.count() errors_remaining = conditional_countdown(labels2, num_errors) nodes = None for nodes, label, errs, count in \ zip(merge_seq, labels1, errors_remaining, counter): n1, n2 = nodes if not label: self.merge_nodes(n1, n2) elif errs == 0: break return next(counter), nodes def rename_node(self, old, new): """Rename node `old` to `new`, updating edges and weights. Parameters ---------- old : int The node being renamed. new : int The new node id. """ self.add_node(new, attr_dict=self.node[old]) self.add_edges_from( [(new, v, self[old][v]) for v in self.neighbors(old)]) for v in self.neighbors(new): qitem = self[new][v].get('qlink', None) if qitem is not None: if qitem[2] == old: qitem[2] = new else: qitem[3] = new self.remove_node(old) def merge_nodes(self, n1, n2, merge_priority=0.0): """Merge two nodes, while updating the necessary edges. Parameters ---------- n1, n2 : int Nodes determining the edge for which to update the UCM. merge_priority : float, optional The merge priority of the merge. Returns ------- node_id : int The id of the node resulting from the merge. Notes ----- Additionally, the RIG (region intersection graph), the contingency matrix to the ground truth (if provided) is updated. """ if len(self.node[n1]['exclusions'] & self.node[n2]['exclusions']) > 0: return else: self.node[n1]['exclusions'].update(self.node[n2]['exclusions']) w = self[n1][n2].get('weight', merge_priority) self.node[n1]['size'] += self.node[n2]['size'] self.node[n1]['fragments'].update(self.node[n2]['fragments']) self.feature_manager.update_node_cache(self, n1, n2, self.node[n1]['feature-cache'], self.node[n2]['feature-cache']) new_neighbors = [n for n in self.neighbors(n2) if n != n1] for n in new_neighbors: self.merge_edge_properties((n2, n), (n1, n)) try: self.merge_queue.invalidate(self[n1][n2]['qlink']) except KeyError: pass node_id = self.tree.merge(n1, n2, w) self.remove_node(n2) self.rename_node(n1, node_id) self.rig[node_id] = self.rig[n1] + self.rig[n2] self.rig[n1] = 0 self.rig[n2] = 0 return node_id def merge_subgraph(self, subgraph=None, source=None): """Merge a (typically) connected set of nodes together. Parameters ---------- subgraph : agglo.Rag, networkx.Graph, or list of int (node id) A subgraph to merge. source : int (node id), optional Merge the subgraph to this node. """ if type(subgraph) not in [Rag, Graph]: # input is node list subgraph = self.subgraph(subgraph) if len(subgraph) == 0: return for subsubgraph in nx.connected_component_subgraphs(subgraph): node_dfs = list(dfs_preorder_nodes(subsubgraph, source)) # dfs_preorder_nodes returns iter, convert to list source_node, other_nodes = node_dfs[0], node_dfs[1:] for current_node in other_nodes: source_node = self.merge_nodes(source_node, current_node) def split_node(self, u, n=2, **kwargs): """Use normalized cuts [1] to split a node/segment. Parameters ---------- u : int (node id) Which node to split. n : int, optional How many segments to split it into. Returns ------- None References ---------- .. [1] Shi, J., and Malik, J. (2000). Normalized cuts and image segmentation. Pattern Analysis and Machine Intelligence. """ node_extent = self.extent(u) labels = unique(self.watershed_r[node_extent]) self.remove_node(u) self.build_graph_from_watershed(idxs=node_extent) self.ncut(num_clusters=n, nodes=labels, **kwargs) def separate_fragments(self, f0, f1): """Ensure fragments (watersheds) f0 and f1 are in different nodes. If f0 and f1 are the same segment, split that segment at the lowest common ancestor of f0 and f1 in the merge tree, then add an exclusion. Otherwise, simply add an exclusion. Parameters ---------- f0, f1 : int The fragments to be separated. Returns ------- s0, s1 : int The separated segments resulting from the break. If the fragments were already in separate segments, return the highest ancestor of each fragment on the merge tree. """ lca = tree.lowest_common_ancestor(self.tree, f0, f1) if lca is not None: s0, s1 = self.tree.children(lca) self.delete_merge(lca) else: s0 = self.tree.highest_ancestor(f0) s1 = self.tree.highest_ancestor(f1) return s0, s1 def delete_merge(self, tree_node): """Delete the merge represented by `tree_node`. Parameters ---------- tree_node : int A node that may not be currently in the graph, but was at some point in its history. """ highest = self.tree.highest_ancestor(tree_node) if highest != tree_node: leaves = self.tree.leaves(tree_node) # the graph doesn't keep nodes in the history, only the # most recent nodes. So, we only need to find that one and # update its fragment list. self.node[highest]['fragments'].difference_update(leaves) self.tree.remove_node(tree_node) def merge_edge_properties(self, src, dst): """Merge the properties of edge src into edge dst. Parameters ---------- src, dst : (int, int) Edges being merged. Returns ------- None """ u, v = dst w, x = src if not self.has_edge(u,v): self.add_edge(u, v, attr_dict=self[w][x]) else: self[u][v]['boundary'].extend(self[w][x]['boundary']) self.feature_manager.update_edge_cache(self, (u, v), (w, x), self[u][v]['feature-cache'], self[w][x]['feature-cache']) try: self.merge_queue.invalidate(self[w][x]['qlink']) except KeyError: pass self.update_merge_queue(u, v) def update_merge_queue(self, u, v): """Update the merge queue item for edge (u, v). Add new by default. Parameters ---------- u, v : int (node id) Edge being updated. Returns ------- None """ if self.boundary_body in [u, v]: return if 'qlink' in self[u][v]: self.merge_queue.invalidate(self[u][v]['qlink']) if not self.merge_queue.is_null_queue: w = self.merge_priority_function(self,u,v) new_qitem = [w, True, u, v] self[u][v]['qlink'] = new_qitem self[u][v]['weight'] = w self.merge_queue.push(new_qitem) def get_segmentation(self, threshold=None): """Return the unpadded segmentation represented by the graph. Remember that the segmentation volume is padded with an "artificial" segment that envelops the volume. This function simply removes the wrapping and returns a segmented volume. Parameters ---------- threshold : float, optional Get the segmentation at the given threshold. If no threshold is given, return the segmentation at the current level of agglomeration. Returns ------- seg : array of int The segmentation of the volume presently represented by the graph. """ if threshold is None: # a threshold of np.inf is the same as no threshold on the # tree when getting the map (see below). Thus, using a # threshold of `None` (the default), we get the segmentation # implied by the current merge tree. threshold = np.inf elif threshold > self.max_merge_score: # If a higher threshold is required than has been merged, we # continue the agglomeration until that threshold is hit. self.agglomerate(threshold) m = self.tree.get_map(threshold) seg = m[self.watershed] if self.pad_thickness > 1: # volume has zero-boundaries seg = morpho.remove_merged_boundaries(seg, self.connectivity) return morpho.juicy_center(seg, self.pad_thickness) def build_volume(self, nbunch=None): """Return the segmentation induced by the graph. Parameters ---------- nbunch : iterable of int (node id), optional A list of nodes for which to build the volume. All nodes are used if this is not provided. Returns ------- seg : array of int The segmentation implied by the graph. Notes ----- This function is very similar to ``get_segmentation``, but it builds the segmentation from the bottom up, rather than using the currently-stored segmentation. """ v = zeros_like(self.watershed) vr = v.ravel() if nbunch is None: nbunch = self.nodes() for n in nbunch: vr[self.extent(n)] = n return morpho.juicy_center(v,self.pad_thickness) def build_boundary_map(self, ebunch=None): """Return a map of the current merge priority. Parameters ---------- ebunch : iterable of (int, int), optional The list of edges for which to build a map. Use all edges if not provided. Returns ------- bm : array of float The image of the edge weights. """ if len(self.merge_queue) == 0: self.rebuild_merge_queue() m = zeros(self.watershed.shape, 'float') mr = m.ravel() if ebunch is None: ebunch = self.real_edges_iter() ebunch = sorted([(self[u][v]['weight'], u, v) for u, v in ebunch]) for w, u, v in ebunch: b = self[u][v]['boundary'] mr[b] = w if hasattr(self, 'ignored_boundary'): m[self.ignored_boundary] = inf return morpho.juicy_center(m, self.pad_thickness) def remove_obvious_inclusions(self): """Merge any nodes with only one edge to their neighbors.""" for n in self.nodes(): if self.degree(n) == 1: self.merge_nodes(self.neighbors(n)[0], n) def remove_inclusions(self): """Merge any segments fully contained within other segments. In 3D EM images, inclusions are not biologically plausible, so this function can be used to remove them. Parameters ---------- None Returns ------- None """ bcc = list(biconnected_components(self)) if len(bcc) > 1: container = [i for i, s in enumerate(bcc) if self.boundary_body in s][0] del bcc[container] # remove the main graph bcc = list(map(list, bcc)) for cc in bcc: cc.sort(key=lambda x: self.node[x]['size'], reverse=True) bcc.sort(key=lambda x: self.node[x[0]]['size']) for cc in bcc: self.merge_subgraph(cc, cc[0]) def orphans(self): """List all the nodes that do not touch the volume boundary. Parameters ---------- None Returns ------- orphans : list of int (node id) A list of node ids. Notes ----- "Orphans" are not biologically plausible in EM data, so we can flag them with this function for further scrutiny. """ return [n for n in self.nodes() if not self.at_volume_boundary(n)] def compute_orphans(self): """Find all the segments that do not touch the volume boundary. Parameters ---------- None Returns ------- orphans : list of int (node id) A list of node ids. Notes ----- This function differs from ``orphans`` in that it does not use the graph, but rather computes orphans directly from the segmentation. """ return morpho.orphans(self.get_segmentation()) def is_traversed_by_node(self, n): """Determine whether a body traverses the volume. This is defined as touching the volume boundary at two distinct locations. Parameters ---------- n : int (node id) The node being inspected. Returns ------- tr : bool Whether the segment "traverses" the volume being segmented. """ if not self.at_volume_boundary(n) or n == self.boundary_body: return False v = zeros(self.watershed.shape, 'uint8') v.ravel()[self[n][self.boundary_body]['boundary']] = 1 _, n = label(v, ones([3]*v.ndim)) return n > 1 def traversing_bodies(self): """List all bodies that traverse the volume.""" return [n for n in self.nodes() if self.is_traversed_by_node(n)] def non_traversing_bodies(self): """List bodies that are not orphans and do not traverse the volume.""" return [n for n in self.nodes() if self.at_volume_boundary(n) and not self.is_traversed_by_node(n) and n != self.boundary_body] def raveler_body_annotations(self, traverse=False): """Return JSON-compatible dict formatted for Raveler annotations.""" orphans = self.compute_orphans() non_traversing_bodies = self.compute_non_traversing_bodies() \ if traverse else [] data = \ [{'status':'not sure', 'comment':'orphan', 'body ID':int(o)} for o in orphans] +\ [{'status':'not sure', 'comment':'does not traverse', 'body ID':int(n)} for n in non_traversing_bodies] metadata = {'description':'body annotations', 'file version':2} return {'data':data, 'metadata':metadata} def at_volume_boundary(self, n): """Return True if node n touches the volume boundary.""" return self.has_edge(n, self.boundary_body) or n == self.boundary_body def should_merge(self, n1, n2): return self.rig[n1].argmax() == self.rig[n2].argmax() def get_pixel_label(self, n1, n2): boundary = array(self[n1][n2]['boundary']) min_idx = boundary[self.probabilities_r[boundary,0].argmin()] if self.should_merge(n1, n2): return min_idx, 2 else: return min_idx, 1 def pixel_labels_array(self, false_splits_only=False): ar = zeros_like(self.watershed_r) labels = [self.get_pixel_label(*e) for e in self.real_edges()] if false_splits_only: labels = [l for l in labels if l[1] == 2] ids, ls = list(map(array,zip(*labels))) ar[ids] = ls.astype(ar.dtype) return ar.reshape(self.watershed.shape) def split_vi(self, gt=None): if self.gt is None and gt is None: return array([0,0]) elif self.gt is not None: return split_vi(self.rig) else: return split_vi(self.get_segmentation(), gt, [0], [0]) def boundary_indices(self, n1, n2): return self[n1][n2]['boundary'] def get_edge_coordinates(self, n1, n2, arbitrary=False): """Find where in the segmentation the edge (n1, n2) is most visible.""" return get_edge_coordinates(self, n1, n2, arbitrary) def write(self, fout, output_format='GraphML'): if output_format == 'Plaza JSON': self.write_plaza_json(fout) else: raise ValueError('Unsupported output format for agglo.Rag: %s' % output_format) def write_plaza_json(self, fout, synapsejson=None, offsetz=0): """Write graph to Steve Plaza's JSON spec.""" json_vals = {} if synapsejson is not None: synapse_file = open(synapsejson) json_vals1 = json.load(synapse_file) body_count = {} for item in json_vals1["data"]: bodyid = ((item["T-bar"])["body ID"]) if bodyid in body_count: body_count[bodyid] += 1 else: body_count[bodyid] = 1 for psd in item["partners"]: bodyid = psd["body ID"] if bodyid in body_count: body_count[bodyid] += 1 else: body_count[bodyid] = 1 json_vals["synapse_bodies"] = [] for body, count in body_count.items(): temp = [body, count] json_vals["synapse_bodies"].append(temp) edge_list = [ {'location': list(map(int, self.get_edge_coordinates(i, j)[-1::-1])), 'node1': int(i), 'node2': int(j), 'edge_size': len(self[i][j]['boundary']), 'size1': self.node[i]['size'], 'size2': self.node[j]['size'], 'weight': float(self[i][j]['weight'])} for i, j in self.real_edges() ] json_vals['edge_list'] = edge_list with open(fout, 'w') as f: json.dump(json_vals, f, indent=4) def ncut(self, num_clusters=10, kmeans_iters=5, sigma=255.0*20, nodes=None, **kwargs): """Run normalized cuts on the current set of superpixels. Keyword arguments: num_clusters -- number of clusters to compute kmeans_iters -- # iterations to run kmeans when clustering sigma -- sigma value when setting up weight matrix Return value: None """ if nodes is None: nodes = self.nodes() # Compute weight matrix W = self.compute_W(self.merge_priority_function, nodes=nodes) # Run normalized cut labels, eigvec, eigval = ncutW(W, num_clusters, kmeans_iters, **kwargs) # Merge nodes that are in same cluster self.cluster_by_labels(labels, nodes) def cluster_by_labels(self, labels, nodes=None): """Merge all superpixels with the same label (1 label per 1 sp)""" if nodes is None: nodes = array(self.nodes()) if not (len(labels) == len(nodes)): raise ValueError('Number of labels should be %d but is %d.', self.number_of_nodes(), len(labels)) for l in unique(labels): inds = nonzero(labels==l)[0] nodes_to_merge = nodes[inds] node1 = nodes_to_merge[0] for node in nodes_to_merge[1:]: self.merge_nodes(node1, node) def compute_W(self, merge_priority_function, sigma=255.0*20, nodes=None): """ Computes the weight matrix for clustering""" if nodes is None: nodes = array(self.nodes()) n = len(nodes) nodes2ind = dict(zip(nodes, range(n))) W = lil_matrix((n,n)) for u, v in self.real_edges(nodes): try: i, j = nodes2ind[u], nodes2ind[v] except KeyError: continue w = merge_priority_function(self,u,v) W[i,j] = W[j,i] = np.exp(-w**2/sigma) return W def update_frozen_sets(self, n1, n2): self.frozen_nodes.discard(n1) self.frozen_nodes.discard(n2) for x, y in self.frozen_edges.copy(): if n2 in [x, y]: self.frozen_edges.discard((x, y)) if x == n2: self.frozen_edges.add((n1, y)) if y == n2: self.frozen_edges.add((x, n1)) def get_edge_coordinates(g, n1, n2, arbitrary=False): """Find where in the segmentation the edge (n1, n2) is most visible.""" boundary = g[n1][n2]['boundary'] if arbitrary: # quickly get an arbitrary point on the boundary idx = boundary.pop(); boundary.append(idx) coords = unravel_index(idx, g.watershed.shape) else: boundary_idxs = unravel_index(boundary, g.watershed.shape) coords = [bincount(dimcoords).argmax() for dimcoords in boundary_idxs] return array(coords) - g.pad_thickness def is_mito_boundary(g, n1, n2, channel=2, threshold=0.5): return max(np.mean(g.probabilities_r[g[n1][n2]['boundary'], c]) for c in channel) > threshold def is_mito(g, n, channel=2, threshold=0.5): return max(np.mean(g.probabilities_r[g.extent(n), c]) for c in channel) > threshold def best_possible_segmentation(ws, gt): """Build the best possible segmentation given a superpixel map.""" ws = Rag(ws) assignment = ev.assignment_table(ws.get_segmentation(), gt).tocsc() for gt_node in range(assignment.shape[1]): i, j = assignment.indptr[gt_node : gt_node+2] ws.merge_subgraph(assignment.indices[i:j]) return ws.get_segmentation() Use function, not dict key, to find boundaries This is the first step to replacing the boundary dictionary with a sparselol containing the same information. # built-ins import itertools as it import sys import argparse import random import logging import json import collections from copy import deepcopy # libraries from numpy import (array, mean, zeros, zeros_like, where, unique, newaxis, nonzero, median, float, ones, arange, inf, isnan, flatnonzero, unravel_index, bincount) import numpy as np from scipy.stats import sem from scipy import sparse from scipy.sparse import lil_matrix from scipy.misc import comb as nchoosek from scipy.ndimage.measurements import label from scipy import ndimage as ndi import networkx as nx from networkx import Graph, biconnected_components from networkx.algorithms.traversal.depth_first_search import dfs_preorder_nodes from skimage.segmentation import relabel_sequential from viridis import tree # local modules from . import morpho from . import sparselol as lol from . import iterprogress as ip from . import optimized as opt from .ncut import ncutW from .mergequeue import MergeQueue from .evaluate import merge_contingency_table, split_vi, xlogx from . import evaluate as ev from . import features from . import classify from .classify import get_classifier, \ unique_learning_data_elements, concatenate_data_elements from .dtypes import label_dtype arguments = argparse.ArgumentParser(add_help=False) arggroup = arguments.add_argument_group('Agglomeration options') arggroup.add_argument('-t', '--thresholds', nargs='+', default=[128], type=float, metavar='FLOAT', help='''The agglomeration thresholds. One output file will be written for each threshold.''' ) arggroup.add_argument('-l', '--ladder', type=int, metavar='SIZE', help='Merge any bodies smaller than SIZE.' ) arggroup.add_argument('-p', '--pre-ladder', action='store_true', default=True, help='Run ladder before normal agglomeration (default).' ) arggroup.add_argument('-L', '--post-ladder', action='store_false', dest='pre_ladder', help='Run ladder after normal agglomeration instead of before (SLOW).' ) arggroup.add_argument('-s', '--strict-ladder', type=int, metavar='INT', default=1, help='''Specify the strictness of the ladder agglomeration. Level 1 (default): merge anything smaller than the ladder threshold as long as it's not on the volume border. Level 2: only merge smaller bodies to larger ones. Level 3: only merge when the border is larger than or equal to 2 pixels.''' ) arggroup.add_argument('-M', '--low-memory', action='store_true', help='''Use less memory at a slight speed cost. Note that the phrase 'low memory' is relative.''' ) arggroup.add_argument('--disallow-shared-boundaries', action='store_false', dest='allow_shared_boundaries', help='''Watershed pixels that are shared between more than 2 labels are not counted as edges.''' ) arggroup.add_argument('--allow-shared-boundaries', action='store_true', default=True, help='''Count every watershed pixel in every edge in which it participates (default: True).''' ) def conditional_countdown(seq, start=1, pred=bool): """Count down from 'start' each time pred(elem) is true for elem in seq. Used to know how many elements of a sequence remain that satisfy a predicate. Parameters ---------- seq : iterable Any sequence. start : int, optional The starting element. pred : function, type(next(seq)) -> bool A predicate acting on the elements of `seq`. Examples -------- >>> seq = range(10) >>> cc = conditional_countdown(seq, start=5, pred=lambda x: x % 2 == 1) >>> next(cc) 5 >>> next(cc) 4 >>> next(cc) 4 >>> next(cc) 3 """ remaining = start for elem in seq: if pred(elem): remaining -= 1 yield remaining ############################ # Merge priority functions # ############################ def oriented_boundary_mean(g, n1, n2): return mean(g.oriented_probabilities_r[g.boundary(n1, n2)]) def boundary_mean(g, n1, n2): return mean(g.probabilities_r[g.boundary(n1, n2)]) def boundary_median(g, n1, n2): return median(g.probabilities_r[g.boundary(n1, n2)]) def approximate_boundary_mean(g, n1, n2): """Return the boundary mean as computed by a MomentsFeatureManager. The feature manager is assumed to have been set up for g at construction. """ return g.feature_manager.compute_edge_features(g, n1, n2)[1] def make_ladder(priority_function, threshold, strictness=1): def ladder_function(g, n1, n2): s1 = g.node[n1]['size'] s2 = g.node[n2]['size'] ladder_condition = \ (s1 < threshold and not g.at_volume_boundary(n1)) or \ (s2 < threshold and not g.at_volume_boundary(n2)) if strictness >= 2: ladder_condition &= ((s1 < threshold) != (s2 < threshold)) if strictness >= 3: ladder_condition &= len(g.boundary(n1, n2)) > 2 if ladder_condition: return priority_function(g, n1, n2) else: return inf return ladder_function def no_mito_merge(priority_function): def predict(g, n1, n2): frozen = (n1 in g.frozen_nodes or n2 in g.frozen_nodes or (n1, n2) in g.frozen_edges) if frozen: return np.inf else: return priority_function(g, n1, n2) return predict def mito_merge(): def predict(g, n1, n2): if n1 in g.frozen_nodes and n2 in g.frozen_nodes: return np.inf elif (n1, n2) in g.frozen_edges: return np.inf elif n1 not in g.frozen_nodes and n2 not in g.frozen_nodes: return np.inf else: if n1 in g.frozen_nodes: mito = n1 cyto = n2 else: mito = n2 cyto = n1 if g.node[mito]['size'] > g.node[cyto]['size']: return np.inf else: return 1.0 - (float(len(g.boundary(mito, cyto)))/ sum([len(g.boundary(mito, x)) for x in g.neighbors(mito)])) return predict def classifier_probability(feature_extractor, classifier): def predict(g, n1, n2): if n1 == g.boundary_body or n2 == g.boundary_body: return inf features = np.atleast_2d(feature_extractor(g, n1, n2)) try: prediction = classifier.predict_proba(features) prediction_arr = np.array(prediction, copy=False) if prediction_arr.ndim > 2: prediction_arr = prediction_arr[0] try: prediction = prediction_arr[0][1] except (TypeError, IndexError): prediction = prediction_arr[0] except AttributeError: prediction = classifier.predict(features)[0] return prediction return predict def ordered_priority(edges): d = {} n = len(edges) for i, (n1, n2) in enumerate(edges): score = float(i)/n d[(n1,n2)] = score d[(n2,n1)] = score def ord(g, n1, n2): return d.get((n1,n2), inf) return ord def expected_change_vi(feature_extractor, classifier, alpha=1.0, beta=1.0): prob_func = classifier_probability(feature_extractor, classifier) def predict(g, n1, n2): p = prob_func(g, n1, n2) # Prediction from the classifier # Calculate change in VI if n1 and n2 should not be merged v = compute_local_vi_change( g.node[n1]['size'], g.node[n2]['size'], g.volume_size ) # Return expected change return (p*alpha*v + (1.0-p)*(-beta*v)) return predict def compute_local_vi_change(s1, s2, n): """Compute change in VI if we merge disjoint sizes s1,s2 in a volume n.""" py1 = float(s1)/n py2 = float(s2)/n py = py1+py2 return -(py1*np.log2(py1) + py2*np.log2(py2) - py*np.log2(py)) def compute_true_delta_vi(ctable, n1, n2): p1 = ctable[n1].sum() p2 = ctable[n2].sum() p3 = p1+p2 p1g_log_p1g = xlogx(ctable[n1]).sum() p2g_log_p2g = xlogx(ctable[n2]).sum() p3g_log_p3g = xlogx(ctable[n1]+ctable[n2]).sum() return p3*np.log2(p3) - p1*np.log2(p1) - p2*np.log2(p2) - \ 2*(p3g_log_p3g - p1g_log_p1g - p2g_log_p2g) def expected_change_rand(feature_extractor, classifier, alpha=1.0, beta=1.0): prob_func = classifier_probability(feature_extractor, classifier) def predict(g, n1, n2): p = float(prob_func(g, n1, n2)) # Prediction from the classifier v = compute_local_rand_change( g.node[n1]['size'], g.node[n2]['size'], g.volume_size ) return p*v*alpha + (1.0-p)*(-beta*v) return predict def compute_local_rand_change(s1, s2, n): """Compute change in rand if we merge disjoint sizes s1,s2 in volume n.""" return float(s1*s2)/nchoosek(n,2) def compute_true_delta_rand(ctable, n1, n2, n): """Compute change in RI obtained by merging rows n1 and n2. This function assumes ctable is normalized to sum to 1. """ localct = n * ctable[(n1, n2), :] total = localct.data.sum() sqtotal = (localct.data ** 2).sum() delta_sxy = 1. / 2 * ((np.array(localct.sum(axis=0)) ** 2).sum() - sqtotal) delta_sx = 1. / 2 * (total ** 2 - (np.array(localct.sum(axis=1)) ** 2).sum()) return (2 * delta_sxy - delta_sx) / nchoosek(n, 2) def boundary_mean_ladder(g, n1, n2, threshold, strictness=1): f = make_ladder(boundary_mean, threshold, strictness) return f(g, n1, n2) def boundary_mean_plus_sem(g, n1, n2, alpha=-6): bvals = g.probabilities_r[g.boundary(n1, n2)] return mean(bvals) + alpha*sem(bvals) def random_priority(g, n1, n2): if n1 == g.boundary_body or n2 == g.boundary_body: return inf return random.random() class Rag(Graph): """Region adjacency graph for segmentation of nD volumes. Parameters ---------- watershed : array of int, shape (M, N, ..., P) The labeled regions of the image. Note: this is called `watershed` for historical reasons, but could refer to a superpixel map of any origin. probabilities : array of float, shape (M, N, ..., P[, Q]) The probability of each pixel of belonging to a particular class. Typically, this has the same shape as `watershed` and represents the probability that the pixel is part of a region boundary, but it can also have an additional dimension for probabilities of belonging to other classes, such as mitochondria (in biological images) or specific textures (in natural images). merge_priority_function : callable function, optional This function must take exactly three arguments as input (a Rag object and two node IDs) and return a single float. feature_manager : ``features.base.Null`` object, optional A feature manager object that controls feature computation and feature caching. mask : array of bool, shape (M, N, ..., P) A mask of the same shape as `watershed`, `True` in the positions to be processed when making a RAG, `False` in the positions to ignore. show_progress : bool, optional Whether to display an ASCII progress bar during long- -running graph operations. connectivity : int in {1, ..., `watershed.ndim`} When determining adjacency, allow neighbors along `connectivity` dimensions. channel_is_oriented : array-like of bool, shape (Q,), optional For multi-channel images, some channels, for example some edge detectors, have a specific orientation. In conjunction with the `orientation_map` argument, specify which channels have an orientation associated with them. orientation_map : array-like of float, shape (Q,) Specify the orientation of the corresponding channel. (2D images only) normalize_probabilities : bool, optional Divide the input `probabilities` by their maximum to ensure a range in [0, 1]. exclusions : array-like of int, shape (M, N, ..., P), optional Volume of same shape as `watershed`. Mark points in the volume with the same label (>0) to prevent them from being merged during agglomeration. For example, if `exclusions[45, 92] == exclusions[51, 105] == 1`, then segments `watershed[45, 92]` and `watershed[51, 105]` will never be merged, regardless of the merge priority function. isfrozennode : function, optional Function taking in a Rag object and a node id and returning a bool. If the function returns ``True``, the node will not be merged, regardless of the merge priority function. isfrozenedge : function, optional As `isfrozennode`, but the function should take the graph and *two* nodes, to specify an edge that cannot be merged. """ def __init__(self, watershed=array([], label_dtype), probabilities=array([]), merge_priority_function=boundary_mean, gt_vol=None, feature_manager=features.base.Null(), mask=None, show_progress=False, connectivity=1, channel_is_oriented=None, orientation_map=array([]), normalize_probabilities=False, exclusions=array([]), isfrozennode=None, isfrozenedge=None): super(Rag, self).__init__(weighted=False) self.show_progress = show_progress self.connectivity = connectivity self.pbar = (ip.StandardProgressBar() if self.show_progress else ip.NoProgressBar()) self.set_watershed(watershed, connectivity) self.set_probabilities(probabilities, normalize_probabilities) self.set_orientations(orientation_map, channel_is_oriented) self.merge_priority_function = merge_priority_function self.max_merge_score = -inf if mask is None: self.mask = np.ones(self.watershed_r.shape, dtype=bool) else: self.mask = morpho.pad(mask, True).ravel() self.build_graph_from_watershed() self.set_feature_manager(feature_manager) self.set_ground_truth(gt_vol) self.set_exclusions(exclusions) self.merge_queue = MergeQueue() self.tree = tree.Ultrametric(self.nodes()) self.frozen_nodes = set() if isfrozennode is not None: for node in self.nodes(): if isfrozennode(self, node): self.frozen_nodes.add(node) self.frozen_edges = set() if isfrozenedge is not None: for n1, n2 in self.edges(): if isfrozenedge(self, n1, n2): self.frozen_edges.add((n1,n2)) def __copy__(self): """Return a copy of the object and attributes. """ pr_shape = self.probabilities_r.shape g = super(Rag, self).copy() g.watershed_r = g.watershed.ravel() g.probabilities_r = g.probabilities.reshape(pr_shape) return g def copy(self): """Return a copy of the object and attributes. """ return self.__copy__() def extent(self, nodeid): try: ext = self.extents full_ext = [ext.indices[ext.indptr[f]:ext.indptr[f+1]] for f in self.node[nodeid]['fragments']] return np.concatenate(full_ext).astype(np.intp) except AttributeError: extent_array = opt.flood_fill(self.watershed, np.array(self.node[nodeid]['entrypoint']), np.fromiter(self.node[nodeid]['fragments'], dtype=int)) if len(extent_array) != self.node[nodeid]['size']: sys.stderr.write('Flood fill fail - found %d voxels but size' 'expected %d\n' % (len(extent_array), self.node[nodeid]['size'])) raveled_indices = np.ravel_multi_index(extent_array.T, self.watershed.shape) return set(raveled_indices) def boundary(self, u, v): edge_dict = self[u][v] try: return edge_dict['boundary'] except KeyError: pass # not using old system ids = edge_dict['boundary-ids'] b = self.boundaries all_bounds = [b.indices[b.indptr[i]:b.indptr[i+1]] for i in ids] return np.concatenate(all_bounds).astype(np.intp) def real_edges(self, *args, **kwargs): """Return edges internal to the volume. The RAG actually includes edges to a "virtual" region that envelops the entire volume. This function returns the list of edges that are internal to the volume. Parameters ---------- *args, **kwargs : arbitrary types Arguments and keyword arguments are passed through to the ``edges()`` function of the ``networkx.Graph`` class. Returns ------- edge_list : list of tuples A list of pairs of node IDs, which are typically integers. See Also -------- real_edges_iter, networkx.Graph.edges """ return [e for e in super(Rag, self).edges(*args, **kwargs) if self.boundary_body not in e[:2]] def real_edges_iter(self, *args, **kwargs): """Return iterator of edges internal to the volume. The RAG actually includes edges to a "virtual" region that envelops the entire volume. This function returns the list of edges that are internal to the volume. Parameters ---------- *args, **kwargs : arbitrary types Arguments and keyword arguments are passed through to the ``edges()`` function of the ``networkx.Graph`` class. Returns ------- edges_iter : iterator of tuples An iterator over pairs of node IDs, which are typically integers. """ return (e for e in super(Rag, self).edges_iter(*args, **kwargs) if self.boundary_body not in e[:2]) def build_graph_from_watershed(self, idxs=None): """Build the graph object from the region labels. The region labels should have been set ahead of time using ``set_watershed()``. Parameters ---------- idxs : array-like of int, optional Linear indices into raveled volume array. If provided, the graph is built only for these indices. """ if self.watershed.size == 0: return # stop processing for empty graphs if idxs is None: idxs = arange(self.watershed.size, dtype=self.steps.dtype) idxs = idxs[self.mask[idxs]] # use only masked idxs self.add_node(self.boundary_body) labels = np.unique(self.watershed_r[idxs]) sizes = np.bincount(self.watershed_r) if not hasattr(self, 'extents'): self.extents = lol.extents(self.watershed) for nodeid in labels: self.add_node(nodeid) node = self.node[nodeid] node['size'] = sizes[nodeid] node['fragments'] = set([nodeid]) node['entrypoint'] = ( np.array(np.unravel_index(self.extent(nodeid)[0], self.watershed.shape))) inner_idxs = idxs[self.watershed_r[idxs] != self.boundary_body] if self.show_progress: inner_idxs = ip.with_progress(inner_idxs, title='Graph ', pbar=self.pbar) for idx in inner_idxs: nodeid = self.watershed_r[idx] ns = idx + self.steps ns = ns[self.mask[ns]] adj = self.watershed_r[ns] adj = set(adj) for v in adj: if v == nodeid: continue if self.has_edge(nodeid, v): self[nodeid][v]['boundary'].append(idx) else: self.add_edge(nodeid, v, boundary=[idx]) def set_feature_manager(self, feature_manager): """Set the feature manager and ensure feature caches are computed. Parameters ---------- feature_manager : ``features.base.Null`` object The feature manager to be used by this RAG. Returns ------- None """ self.feature_manager = feature_manager self.compute_feature_caches() def compute_feature_caches(self): """Use the feature manager to compute node and edge feature caches. Parameters ---------- None Returns ------- None """ for n in ip.with_progress( self.nodes(), title='Node caches ', pbar=self.pbar): self.node[n]['feature-cache'] = \ self.feature_manager.create_node_cache(self, n) for n1, n2 in ip.with_progress( self.edges(), title='Edge caches ', pbar=self.pbar): self[n1][n2]['feature-cache'] = \ self.feature_manager.create_edge_cache(self, n1, n2) def set_probabilities(self, probs=array([]), normalize=False): """Set the `probabilities` attributes of the RAG. For various reasons, including removing the need for bounds checking when looking for neighboring pixels, the volume of pixel-level probabilities is padded on all faces. In addition, this function adds an attribute `probabilities_r`, a raveled view of the padded probabilities array for quick access to individual voxels using linear indices. Parameters ---------- probs : array The input probabilities array. normalize : bool, optional If ``True``, the values in the array are scaled to be in [0, 1]. Returns ------- None """ if len(probs) == 0: self.probabilities = zeros_like(self.watershed) self.probabilities_r = self.probabilities.ravel() probs = probs.astype('float') if normalize and len(probs) > 1: probs -= probs.min() # ensure probs.min() == 0 probs /= probs.max() # ensure probs.max() == 1 sp = probs.shape sw = tuple(array(self.watershed.shape, dtype=int)-\ 2*self.pad_thickness*ones(self.watershed.ndim, dtype=int)) p_ndim = probs.ndim w_ndim = self.watershed.ndim padding = [inf]+(self.pad_thickness-1)*[0] if p_ndim == w_ndim: self.probabilities = morpho.pad(probs, padding) self.probabilities_r = self.probabilities.ravel()[:,newaxis] elif p_ndim == w_ndim+1: axes = list(range(p_ndim-1)) self.probabilities = morpho.pad(probs, padding, axes) self.probabilities_r = self.probabilities.reshape( (self.watershed.size, -1)) def set_orientations(self, orientation_map, channel_is_oriented): """Set the orientation map of the probability image. Parameters ---------- orientation_map : array of float A map of angles of the same shape as the superpixel map. channel_is_oriented : 1D array-like of bool A vector having length the number of channels in the probability map. Returns ------- None """ if len(orientation_map) == 0: self.orientation_map = zeros_like(self.watershed) self.orientation_map_r = self.orientation_map.ravel() padding = [0]+(self.pad_thickness-1)*[0] self.orientation_map = morpho.pad(orientation_map, padding).astype(int) self.orientation_map_r = self.orientation_map.ravel() if channel_is_oriented is None: nchannels = 1 if self.probabilities.ndim==self.watershed.ndim \ else self.probabilities.shape[-1] self.channel_is_oriented = array([False]*nchannels) self.max_probabilities_r = zeros_like(self.probabilities_r) self.oriented_probabilities_r = zeros_like(self.probabilities_r) self.non_oriented_probabilities_r = self.probabilities_r else: self.channel_is_oriented = channel_is_oriented self.max_probabilities_r = \ self.probabilities_r[:, self.channel_is_oriented].max(axis=1) self.oriented_probabilities_r = \ self.probabilities_r[:, self.channel_is_oriented] self.oriented_probabilities_r = \ self.oriented_probabilities_r[ list(range(len(self.oriented_probabilities_r))), self.orientation_map_r] self.non_oriented_probabilities_r = \ self.probabilities_r[:, ~self.channel_is_oriented] def set_watershed(self, ws=array([], label_dtype), connectivity=1): """Set the initial segmentation volume (watershed). The initial segmentation is called `watershed` for historical reasons only. Parameters ---------- ws : array of int The initial segmentation. connectivity : int in {1, ..., `ws.ndim`}, optional The pixel neighborhood. Returns ------- None """ ws = ws.astype(label_dtype) try: self.boundary_body = np.max(ws) + 1 except ValueError: # empty watershed given self.boundary_body = 1 self.volume_size = ws.size if ws.size > 0: ws, _, inv = relabel_sequential(ws) self.inverse_watershed_map = inv # translates to original labels self.watershed = morpho.pad(ws, self.boundary_body) self.watershed_r = self.watershed.ravel() self.pad_thickness = 1 self.steps = morpho.raveled_steps_to_neighbors(self.watershed.shape, connectivity) def set_ground_truth(self, gt=None): """Set the ground truth volume. This is useful for tracking segmentation accuracy over time. Parameters ---------- gt : array of int A ground truth segmentation of the same volume passed to ``set_watershed``. Returns ------- None """ if gt is not None: gtm = gt.max()+1 gt_ignore = [0, gtm] if (gt==0).any() else [gtm] seg_ignore = [0, self.boundary_body] if \ (self.watershed==0).any() else [self.boundary_body] self.gt = morpho.pad(gt, gtm) self.rig = merge_contingency_table(self.watershed, self.gt, ignore_seg=seg_ignore, ignore_gt=gt_ignore) else: self.gt = None # null pattern to transparently allow merging of nodes. # Bonus feature: counts how many sp's went into a single node. try: self.rig = ones(2 * self.watershed.max() + 1) except ValueError: self.rig = ones(2 * self.number_of_nodes() + 1) def set_exclusions(self, excl): """Set an exclusion volume, forbidding certain merges. Parameters ---------- excl : array of int Exclusions work as follows: the volume `excl` is the same shape as the initial segmentation (see ``set_watershed``), and consists of mostly 0s. Any voxels with *the same* non-zero label will not be allowed to merge during agglomeration (provided they were not merged in the initial segmentation). This allows manual separation *a priori* of difficult-to- -segment regions. Returns ------- None """ if excl.size != 0: excl = morpho.pad(excl, [0]*self.pad_thickness) for n in self.nodes(): if excl.size != 0: eids = unique(excl.ravel()[self.extent(n)]) eids = eids[flatnonzero(eids)] self.node[n]['exclusions'] = set(list(eids)) else: self.node[n]['exclusions'] = set() def build_merge_queue(self): """Build a queue of node pairs to be merged in a specific priority. Returns ------- mq : MergeQueue object A MergeQueue is a Python ``deque`` with a specific element structure: a list of length 4 containing: - the merge priority (any ordered type) - a 'valid' flag - and the two nodes in arbitrary order The valid flag allows one to "remove" elements from the queue in O(1) time by setting the flag to ``False``. Then, one checks the flag when popping elements and ignores those marked as invalid. One other specific feature is that there are back-links from edges to their corresponding queue items so that when nodes are merged, affected edges can be invalidated and reinserted in the queue with a new priority. """ queue_items = [] for l1, l2 in self.real_edges_iter(): w = self.merge_priority_function(self,l1,l2) qitem = [w, True, l1, l2] queue_items.append(qitem) self[l1][l2]['qlink'] = qitem self[l1][l2]['weight'] = w return MergeQueue(queue_items, with_progress=self.show_progress) def rebuild_merge_queue(self): """Build a merge queue from scratch and assign to self.merge_queue. See Also -------- build_merge_queue """ self.merge_queue = self.build_merge_queue() def agglomerate(self, threshold=0.5, save_history=False): """Merge nodes hierarchically until given edge confidence threshold. This is the main workhorse of the ``agglo`` module! Parameters ---------- threshold : float, optional The edge priority at which to stop merging. save_history : bool, optional Whether to save and return a history of all the merges made. Returns ------- history : list of tuple of int, optional The ordered history of node pairs merged. scores : list of float, optional The list of merge scores corresponding to the `history`. evaluation : list of tuple, optional The split VI after each merge. This is only meaningful if a ground truth volume was provided at build time. Notes ----- This function returns ``None`` when `save_history` is ``False``. """ if self.merge_queue.is_empty(): self.merge_queue = self.build_merge_queue() history, scores, evaluation = [], [], [] while len(self.merge_queue) > 0 and \ self.merge_queue.peek()[0] < threshold: merge_priority, _, n1, n2 = self.merge_queue.pop() self.update_frozen_sets(n1, n2) self.merge_nodes(n1, n2, merge_priority) if save_history: history.append((n1,n2)) scores.append(merge_priority) evaluation.append( (self.number_of_nodes()-1, self.split_vi()) ) if save_history: return history, scores, evaluation def agglomerate_count(self, stepsize=100, save_history=False): """Agglomerate until 'stepsize' merges have been made. This function is like ``agglomerate``, but rather than to a certain threshold, a certain number of merges are made, regardless of threshold. Parameters ---------- stepsize : int, optional The number of merges to make. save_history : bool, optional Whether to save and return a history of all the merges made. Returns ------- history : list of tuple of int, optional The ordered history of node pairs merged. scores : list of float, optional The list of merge scores corresponding to the `history`. evaluation : list of tuple, optional The split VI after each merge. This is only meaningful if a ground truth volume was provided at build time. Notes ----- This function returns ``None`` when `save_history` is ``False``. See Also -------- agglomerate """ if self.merge_queue.is_empty(): self.merge_queue = self.build_merge_queue() history, evaluation = [], [] i = 0 for i in range(stepsize): if len(self.merge_queue) == 0: break merge_priority, _, n1, n2 = self.merge_queue.pop() i += 1 self.merge_nodes(n1, n2, merge_priority) if save_history: history.append((n1, n2)) evaluation.append( (self.number_of_nodes()-1, self.split_vi()) ) if save_history: return history, evaluation def agglomerate_ladder(self, min_size=1000, strictness=2): """Merge sequentially all nodes smaller than `min_size`. Parameters ---------- min_size : int, optional The smallest allowable segment after ladder completion. strictness : {1, 2, 3}, optional `strictness == 1`: all nodes smaller than `min_size` are merged according to the merge priority function. `strictness == 2`: in addition to `1`, small nodes can only be merged to big nodes. `strictness == 3`: in addition to `2`, nodes sharing less than one pixel of boundary are not agglomerated. Returns ------- None Notes ----- Nodes that are on the volume boundary are not agglomerated. """ original_merge_priority_function = self.merge_priority_function self.merge_priority_function = make_ladder( self.merge_priority_function, min_size, strictness ) self.rebuild_merge_queue() self.agglomerate(inf) self.merge_priority_function = original_merge_priority_function self.merge_queue.finish() self.rebuild_merge_queue() max_score = max([qitem[0] for qitem in self.merge_queue.q]) for n in self.tree.nodes(): self.tree.node[n]['w'] -= max_score def learn_agglomerate(self, gts, feature_map, min_num_samples=1, learn_flat=True, learning_mode='strict', labeling_mode='assignment', priority_mode='active', memory=True, unique=True, random_state=None, max_num_epochs=10, min_num_epochs=2, max_num_samples=np.inf, classifier='random forest', active_function=classifier_probability, mpf=boundary_mean): """Agglomerate while comparing to ground truth & classifying merges. Parameters ---------- gts : array of int or list thereof The ground truth volume(s) corresponding to the current probability map. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. min_num_samples : int, optional Continue training until this many training examples have been collected. learn_flat : bool, optional Do a flat learning on the static graph with no agglomeration. learning_mode : {'strict', 'loose'}, optional In 'strict' mode, if a "don't merge" edge is encountered, it is added to the training set but the merge is not executed. In 'loose' mode, the merge is allowed to proceed. labeling_mode : {'assignment', 'vi-sign', 'rand-sign'}, optional How to decide whether two nodes should be merged based on the ground truth segmentations. ``'assignment'`` means the nodes are assigned to the ground truth node with which they share the highest overlap. ``'vi-sign'`` means the the VI change of the switch is used (negative is better). ``'rand-sign'`` means the change in Rand index is used (positive is better). priority_mode : string, optional One of: ``'active'``: Train a priority function with the data from previous epochs to obtain the next. ``'random'``: Merge edges at random. ``'mixed'``: Alternate between epochs of ``'active'`` and ``'random'``. ``'mean'``: Use the mean boundary value. (In this case, training is limited to 1 or 2 epochs.) ``'custom'``: Use the function provided by `mpf`. memory : bool, optional Keep the training data from all epochs (rather than just the most recent one). unique : bool, optional Remove duplicate feature vectors. random_state : int, optional If provided, this parameter is passed to `get_classifier` to set the random state and allow consistent results across tests. max_num_epochs : int, optional Do not train for longer than this (this argument *may* override the `min_num_samples` argument). min_num_epochs : int, optional Train for no fewer than this number of epochs. max_num_samples : int, optional Train for no more than this number of samples. classifier : string, optional Any valid classifier descriptor. See ``gala.classify.get_classifier()`` active_function : function (feat. map, classifier) -> function, optional Use this to create the next priority function after an epoch. mpf : function (Rag, node, node) -> float A merge priority function to use when ``priority_mode`` is ``'custom'``. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. alldata : list of list of array A list of lists like `data` above: one list for each epoch. Notes ----- The gala algorithm [1] uses the default parameters. For the LASH algorithm [2], use: - `learning_mode`: ``'loose'`` - `labeling_mode`: ``'rand-sign'`` - `memory`: ``False`` References ---------- .. [1] Nunez-Iglesias et al, Machine learning of hierarchical clustering to segment 2D and 3D images, PLOS ONE, 2013. .. [2] Jain et al, Learning to agglomerate superpixel hierarchies, NIPS, 2011. See Also -------- Rag """ learning_mode = learning_mode.lower() labeling_mode = labeling_mode.lower() priority_mode = priority_mode.lower() if priority_mode == 'mean' and unique: max_num_epochs = 2 if learn_flat else 1 if priority_mode in ['random', 'mean'] and not memory: max_num_epochs = 1 label_type_keys = {'assignment':0, 'vi-sign':1, 'rand-sign':2} if type(gts) != list: gts = [gts] # allow using single ground truth as input master_ctables = [merge_contingency_table(self.get_segmentation(), gt) for gt in gts] alldata = [] data = [[],[],[],[]] for num_epochs in range(max_num_epochs): ctables = deepcopy(master_ctables) if len(data[0]) > min_num_samples and num_epochs >= min_num_epochs: break if learn_flat and num_epochs == 0: alldata.append(self.learn_flat(gts, feature_map)) data = unique_learning_data_elements(alldata) if memory \ else alldata[-1] continue g = self.copy() if priority_mode == 'mean': g.merge_priority_function = boundary_mean elif num_epochs > 0 and priority_mode == 'active' or \ num_epochs % 2 == 1 and priority_mode == 'mixed': cl = get_classifier(classifier, random_state=random_state) feat, lab = classify.sample_training_data( data[0], data[1][:, label_type_keys[labeling_mode]], max_num_samples) cl = cl.fit(feat, lab) g.merge_priority_function = active_function(feature_map, cl) elif priority_mode == 'random' or \ (priority_mode == 'active' and num_epochs == 0): g.merge_priority_function = random_priority elif priority_mode == 'custom': g.merge_priority_function = mpf g.show_progress = False # bug in MergeQueue usage causes # progressbar crash. g.rebuild_merge_queue() alldata.append(g.learn_epoch(ctables, feature_map, learning_mode=learning_mode, labeling_mode=labeling_mode)) if memory: if unique: data = unique_learning_data_elements(alldata) else: data = concatenate_data_elements(alldata) else: data = alldata[-1] logging.debug('data size %d at epoch %d'%(len(data[0]), num_epochs)) return data, alldata def learn_flat(self, gts, feature_map): """Learn all edges on the graph, but don't agglomerate. Parameters ---------- gts : array of int or list thereof The ground truth volume(s) corresponding to the current probability map. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. See Also -------- learn_agglomerate """ if type(gts) != list: gts = [gts] # allow using single ground truth as input ctables = [merge_contingency_table(self.get_segmentation(), gt) for gt in gts] assignments = [ev.assignment_table(ct) for ct in ctables] return list(map(array, zip(*[ self.learn_edge(e, ctables, assignments, feature_map) for e in self.real_edges()]))) def learn_edge(self, edge, ctables, assignments, feature_map): """Determine whether an edge should be merged based on ground truth. Parameters ---------- edge : (int, int) tuple An edge in the graph. ctables : list of array A list of contingency tables determining overlap between the current segmentation and the ground truth. assignments : list of array Similar to the contingency tables, but each row is thresholded so each segment corresponds to exactly one ground truth segment. feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. Returns ------- features : 1D array of float The feature vector for that edge. labels : 1D array of float, length 3 The labels determining whether the edge should be merged. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. weights : 1D array of float, length 2 The VI and RI change of the merge. nodes : tuple of int The given edge. """ n1, n2 = edge features = feature_map(self, n1, n2).ravel() # Calculate weights for weighting data points s1, s2 = [self.node[n]['size'] for n in [n1, n2]] weights = \ compute_local_vi_change(s1, s2, self.volume_size), \ compute_local_rand_change(s1, s2, self.volume_size) # Get the fraction of times that n1 and n2 assigned to # same segment in the ground truths cont_labels = [ [(-1)**(a[n1]==a[n2]).toarray().all() for a in assignments], [compute_true_delta_vi(ctable, n1, n2) for ctable in ctables], [-compute_true_delta_rand(ctable, n1, n2, self.volume_size) for ctable in ctables] ] labels = [np.sign(mean(cont_label)) for cont_label in cont_labels] if any(map(isnan, labels)) or any([label == 0 for label in labels]): logging.debug('NaN or 0 labels found. ' + ' '.join(map(str, [labels, (n1, n2)]))) labels = [1 if i==0 or isnan(i) or n1 in self.frozen_nodes or n2 in self.frozen_nodes or (n1, n2) in self.frozen_edges else i for i in labels] return features, labels, weights, (n1, n2) def learn_epoch(self, ctables, feature_map, learning_mode='permissive', labeling_mode='assignment'): """Learn the agglomeration process using various strategies. Parameters ---------- ctables : array of float or list thereof One or more contingency tables between own segments and gold standard segmentations feature_map : function (Rag, node, node) -> array of float The map from node pairs to a feature vector. This must consist either of uncached features or of the cache used when building the graph. learning_mode : {'strict', 'permissive'}, optional If ``'strict'``, don't proceed with a merge when it goes against the ground truth. For historical reasons, 'loose' is allowed as a synonym for 'strict'. labeling_mode : {'assignment', 'vi-sign', 'rand-sign'}, optional Which label to use for `learning_mode`. Note that all labels are saved in the end. Returns ------- data : list of array Four arrays containing: - the feature vectors, shape ``(n_samples, n_features)``. - the labels, shape ``(n_samples, 3)``. A value of `-1` means "should merge", while `1` means "should not merge". The columns correspond to the three labeling methods: assignment, VI sign, or RI sign. - the VI and RI change of each merge, ``(n_edges, 2)``. - the list of merged edges ``(n_edges, 2)``. """ label_type_keys = {'assignment':0, 'vi-sign':1, 'rand-sign':2} assignments = [ev.csrRowExpandableCSR(asst) for asst in map(ev.assignment_table, ctables)] g = self data = [] while len(g.merge_queue) > 0: merge_priority, valid, n1, n2 = g.merge_queue.pop() if g.boundary_body in (n1, n2): continue dat = g.learn_edge((n1,n2), ctables, assignments, feature_map) data.append(dat) label = dat[1][label_type_keys[labeling_mode]] if learning_mode != 'strict' or label < 0: node_id = g.merge_nodes(n1, n2, merge_priority) for ctable, assignment in zip(ctables, assignments): ctable[node_id] = ctable[n1] + ctable[n2] ctable[n1] = 0 ctable[n2] = 0 assignment[node_id] = (ctable[node_id] == ctable[node_id].max()) assignment[n1] = 0 assignment[n2] = 0 return list(map(array, zip(*data))) def replay_merge_history(self, merge_seq, labels=None, num_errors=1): """Agglomerate according to a merge sequence, optionally labeled. Parameters ---------- merge_seq : iterable of pair of int The sequence of node IDs to be merged. labels : iterable of int in {-1, 0, 1}, optional A sequence matching `merge_seq` specifying whether a merge should take place or not. -1 or 0 mean "should merge", 1 otherwise. Returns ------- n : int Number of elements consumed from `merge_seq` e : (int, int) Last merge pair observed. Notes ----- The merge sequence and labels *must* be generators if you don't want to manually keep track of how much has been consumed. The merging continues until `num_errors` false merges have been encountered, or until the sequence is fully consumed. """ if labels is None: labels1 = it.repeat(False) labels2 = it.repeat(False) else: labels1 = (label > 0 for label in labels) labels2 = (label > 0 for label in labels) counter = it.count() errors_remaining = conditional_countdown(labels2, num_errors) nodes = None for nodes, label, errs, count in \ zip(merge_seq, labels1, errors_remaining, counter): n1, n2 = nodes if not label: self.merge_nodes(n1, n2) elif errs == 0: break return next(counter), nodes def rename_node(self, old, new): """Rename node `old` to `new`, updating edges and weights. Parameters ---------- old : int The node being renamed. new : int The new node id. """ self.add_node(new, attr_dict=self.node[old]) self.add_edges_from( [(new, v, self[old][v]) for v in self.neighbors(old)]) for v in self.neighbors(new): qitem = self[new][v].get('qlink', None) if qitem is not None: if qitem[2] == old: qitem[2] = new else: qitem[3] = new self.remove_node(old) def merge_nodes(self, n1, n2, merge_priority=0.0): """Merge two nodes, while updating the necessary edges. Parameters ---------- n1, n2 : int Nodes determining the edge for which to update the UCM. merge_priority : float, optional The merge priority of the merge. Returns ------- node_id : int The id of the node resulting from the merge. Notes ----- Additionally, the RIG (region intersection graph), the contingency matrix to the ground truth (if provided) is updated. """ if len(self.node[n1]['exclusions'] & self.node[n2]['exclusions']) > 0: return else: self.node[n1]['exclusions'].update(self.node[n2]['exclusions']) w = self[n1][n2].get('weight', merge_priority) self.node[n1]['size'] += self.node[n2]['size'] self.node[n1]['fragments'].update(self.node[n2]['fragments']) self.feature_manager.update_node_cache(self, n1, n2, self.node[n1]['feature-cache'], self.node[n2]['feature-cache']) new_neighbors = [n for n in self.neighbors(n2) if n != n1] for n in new_neighbors: self.merge_edge_properties((n2, n), (n1, n)) try: self.merge_queue.invalidate(self[n1][n2]['qlink']) except KeyError: pass node_id = self.tree.merge(n1, n2, w) self.remove_node(n2) self.rename_node(n1, node_id) self.rig[node_id] = self.rig[n1] + self.rig[n2] self.rig[n1] = 0 self.rig[n2] = 0 return node_id def merge_subgraph(self, subgraph=None, source=None): """Merge a (typically) connected set of nodes together. Parameters ---------- subgraph : agglo.Rag, networkx.Graph, or list of int (node id) A subgraph to merge. source : int (node id), optional Merge the subgraph to this node. """ if type(subgraph) not in [Rag, Graph]: # input is node list subgraph = self.subgraph(subgraph) if len(subgraph) == 0: return for subsubgraph in nx.connected_component_subgraphs(subgraph): node_dfs = list(dfs_preorder_nodes(subsubgraph, source)) # dfs_preorder_nodes returns iter, convert to list source_node, other_nodes = node_dfs[0], node_dfs[1:] for current_node in other_nodes: source_node = self.merge_nodes(source_node, current_node) def split_node(self, u, n=2, **kwargs): """Use normalized cuts [1] to split a node/segment. Parameters ---------- u : int (node id) Which node to split. n : int, optional How many segments to split it into. Returns ------- None References ---------- .. [1] Shi, J., and Malik, J. (2000). Normalized cuts and image segmentation. Pattern Analysis and Machine Intelligence. """ node_extent = self.extent(u) labels = unique(self.watershed_r[node_extent]) self.remove_node(u) self.build_graph_from_watershed(idxs=node_extent) self.ncut(num_clusters=n, nodes=labels, **kwargs) def separate_fragments(self, f0, f1): """Ensure fragments (watersheds) f0 and f1 are in different nodes. If f0 and f1 are the same segment, split that segment at the lowest common ancestor of f0 and f1 in the merge tree, then add an exclusion. Otherwise, simply add an exclusion. Parameters ---------- f0, f1 : int The fragments to be separated. Returns ------- s0, s1 : int The separated segments resulting from the break. If the fragments were already in separate segments, return the highest ancestor of each fragment on the merge tree. """ lca = tree.lowest_common_ancestor(self.tree, f0, f1) if lca is not None: s0, s1 = self.tree.children(lca) self.delete_merge(lca) else: s0 = self.tree.highest_ancestor(f0) s1 = self.tree.highest_ancestor(f1) return s0, s1 def delete_merge(self, tree_node): """Delete the merge represented by `tree_node`. Parameters ---------- tree_node : int A node that may not be currently in the graph, but was at some point in its history. """ highest = self.tree.highest_ancestor(tree_node) if highest != tree_node: leaves = self.tree.leaves(tree_node) # the graph doesn't keep nodes in the history, only the # most recent nodes. So, we only need to find that one and # update its fragment list. self.node[highest]['fragments'].difference_update(leaves) self.tree.remove_node(tree_node) def merge_edge_properties(self, src, dst): """Merge the properties of edge src into edge dst. Parameters ---------- src, dst : (int, int) Edges being merged. Returns ------- None """ u, v = dst w, x = src if not self.has_edge(u,v): self.add_edge(u, v, attr_dict=self[w][x]) else: self[u][v]['boundary'].extend(self[w][x]['boundary']) self.feature_manager.update_edge_cache(self, (u, v), (w, x), self[u][v]['feature-cache'], self[w][x]['feature-cache']) try: self.merge_queue.invalidate(self[w][x]['qlink']) except KeyError: pass self.update_merge_queue(u, v) def update_merge_queue(self, u, v): """Update the merge queue item for edge (u, v). Add new by default. Parameters ---------- u, v : int (node id) Edge being updated. Returns ------- None """ if self.boundary_body in [u, v]: return if 'qlink' in self[u][v]: self.merge_queue.invalidate(self[u][v]['qlink']) if not self.merge_queue.is_null_queue: w = self.merge_priority_function(self,u,v) new_qitem = [w, True, u, v] self[u][v]['qlink'] = new_qitem self[u][v]['weight'] = w self.merge_queue.push(new_qitem) def get_segmentation(self, threshold=None): """Return the unpadded segmentation represented by the graph. Remember that the segmentation volume is padded with an "artificial" segment that envelops the volume. This function simply removes the wrapping and returns a segmented volume. Parameters ---------- threshold : float, optional Get the segmentation at the given threshold. If no threshold is given, return the segmentation at the current level of agglomeration. Returns ------- seg : array of int The segmentation of the volume presently represented by the graph. """ if threshold is None: # a threshold of np.inf is the same as no threshold on the # tree when getting the map (see below). Thus, using a # threshold of `None` (the default), we get the segmentation # implied by the current merge tree. threshold = np.inf elif threshold > self.max_merge_score: # If a higher threshold is required than has been merged, we # continue the agglomeration until that threshold is hit. self.agglomerate(threshold) m = self.tree.get_map(threshold) seg = m[self.watershed] if self.pad_thickness > 1: # volume has zero-boundaries seg = morpho.remove_merged_boundaries(seg, self.connectivity) return morpho.juicy_center(seg, self.pad_thickness) def build_volume(self, nbunch=None): """Return the segmentation induced by the graph. Parameters ---------- nbunch : iterable of int (node id), optional A list of nodes for which to build the volume. All nodes are used if this is not provided. Returns ------- seg : array of int The segmentation implied by the graph. Notes ----- This function is very similar to ``get_segmentation``, but it builds the segmentation from the bottom up, rather than using the currently-stored segmentation. """ v = zeros_like(self.watershed) vr = v.ravel() if nbunch is None: nbunch = self.nodes() for n in nbunch: vr[self.extent(n)] = n return morpho.juicy_center(v,self.pad_thickness) def build_boundary_map(self, ebunch=None): """Return a map of the current merge priority. Parameters ---------- ebunch : iterable of (int, int), optional The list of edges for which to build a map. Use all edges if not provided. Returns ------- bm : array of float The image of the edge weights. """ if len(self.merge_queue) == 0: self.rebuild_merge_queue() m = zeros(self.watershed.shape, 'float') mr = m.ravel() if ebunch is None: ebunch = self.real_edges_iter() ebunch = sorted([(self[u][v]['weight'], u, v) for u, v in ebunch]) for w, u, v in ebunch: b = self.boundary(u, v) mr[b] = w if hasattr(self, 'ignored_boundary'): m[self.ignored_boundary] = inf return morpho.juicy_center(m, self.pad_thickness) def remove_obvious_inclusions(self): """Merge any nodes with only one edge to their neighbors.""" for n in self.nodes(): if self.degree(n) == 1: self.merge_nodes(self.neighbors(n)[0], n) def remove_inclusions(self): """Merge any segments fully contained within other segments. In 3D EM images, inclusions are not biologically plausible, so this function can be used to remove them. Parameters ---------- None Returns ------- None """ bcc = list(biconnected_components(self)) if len(bcc) > 1: container = [i for i, s in enumerate(bcc) if self.boundary_body in s][0] del bcc[container] # remove the main graph bcc = list(map(list, bcc)) for cc in bcc: cc.sort(key=lambda x: self.node[x]['size'], reverse=True) bcc.sort(key=lambda x: self.node[x[0]]['size']) for cc in bcc: self.merge_subgraph(cc, cc[0]) def orphans(self): """List all the nodes that do not touch the volume boundary. Parameters ---------- None Returns ------- orphans : list of int (node id) A list of node ids. Notes ----- "Orphans" are not biologically plausible in EM data, so we can flag them with this function for further scrutiny. """ return [n for n in self.nodes() if not self.at_volume_boundary(n)] def compute_orphans(self): """Find all the segments that do not touch the volume boundary. Parameters ---------- None Returns ------- orphans : list of int (node id) A list of node ids. Notes ----- This function differs from ``orphans`` in that it does not use the graph, but rather computes orphans directly from the segmentation. """ return morpho.orphans(self.get_segmentation()) def is_traversed_by_node(self, n): """Determine whether a body traverses the volume. This is defined as touching the volume boundary at two distinct locations. Parameters ---------- n : int (node id) The node being inspected. Returns ------- tr : bool Whether the segment "traverses" the volume being segmented. """ if not self.at_volume_boundary(n) or n == self.boundary_body: return False v = zeros(self.watershed.shape, 'uint8') v.ravel()[self.boundary(n, self.boundary_body)] = 1 _, n = label(v, ones([3]*v.ndim)) return n > 1 def traversing_bodies(self): """List all bodies that traverse the volume.""" return [n for n in self.nodes() if self.is_traversed_by_node(n)] def non_traversing_bodies(self): """List bodies that are not orphans and do not traverse the volume.""" return [n for n in self.nodes() if self.at_volume_boundary(n) and not self.is_traversed_by_node(n) and n != self.boundary_body] def raveler_body_annotations(self, traverse=False): """Return JSON-compatible dict formatted for Raveler annotations.""" orphans = self.compute_orphans() non_traversing_bodies = self.compute_non_traversing_bodies() \ if traverse else [] data = \ [{'status':'not sure', 'comment':'orphan', 'body ID':int(o)} for o in orphans] +\ [{'status':'not sure', 'comment':'does not traverse', 'body ID':int(n)} for n in non_traversing_bodies] metadata = {'description':'body annotations', 'file version':2} return {'data':data, 'metadata':metadata} def at_volume_boundary(self, n): """Return True if node n touches the volume boundary.""" return self.has_edge(n, self.boundary_body) or n == self.boundary_body def should_merge(self, n1, n2): return self.rig[n1].argmax() == self.rig[n2].argmax() def get_pixel_label(self, n1, n2): boundary = self.boundary(n1, n2) min_idx = boundary[self.probabilities_r[boundary,0].argmin()] if self.should_merge(n1, n2): return min_idx, 2 else: return min_idx, 1 def pixel_labels_array(self, false_splits_only=False): ar = zeros_like(self.watershed_r) labels = [self.get_pixel_label(*e) for e in self.real_edges()] if false_splits_only: labels = [l for l in labels if l[1] == 2] ids, ls = list(map(array,zip(*labels))) ar[ids] = ls.astype(ar.dtype) return ar.reshape(self.watershed.shape) def split_vi(self, gt=None): if self.gt is None and gt is None: return array([0,0]) elif self.gt is not None: return split_vi(self.rig) else: return split_vi(self.get_segmentation(), gt, [0], [0]) def get_edge_coordinates(self, n1, n2, arbitrary=False): """Find where in the segmentation the edge (n1, n2) is most visible.""" return get_edge_coordinates(self, n1, n2, arbitrary) def write(self, fout, output_format='GraphML'): if output_format == 'Plaza JSON': self.write_plaza_json(fout) else: raise ValueError('Unsupported output format for agglo.Rag: %s' % output_format) def write_plaza_json(self, fout, synapsejson=None, offsetz=0): """Write graph to Steve Plaza's JSON spec.""" json_vals = {} if synapsejson is not None: synapse_file = open(synapsejson) json_vals1 = json.load(synapse_file) body_count = {} for item in json_vals1["data"]: bodyid = ((item["T-bar"])["body ID"]) if bodyid in body_count: body_count[bodyid] += 1 else: body_count[bodyid] = 1 for psd in item["partners"]: bodyid = psd["body ID"] if bodyid in body_count: body_count[bodyid] += 1 else: body_count[bodyid] = 1 json_vals["synapse_bodies"] = [] for body, count in body_count.items(): temp = [body, count] json_vals["synapse_bodies"].append(temp) edge_list = [ {'location': list(map(int, self.get_edge_coordinates(i, j)[-1::-1])), 'node1': int(i), 'node2': int(j), 'edge_size': len(self.boundary(i, j)), 'size1': self.node[i]['size'], 'size2': self.node[j]['size'], 'weight': float(self[i][j]['weight'])} for i, j in self.real_edges() ] json_vals['edge_list'] = edge_list with open(fout, 'w') as f: json.dump(json_vals, f, indent=4) def ncut(self, num_clusters=10, kmeans_iters=5, sigma=255.0*20, nodes=None, **kwargs): """Run normalized cuts on the current set of superpixels. Keyword arguments: num_clusters -- number of clusters to compute kmeans_iters -- # iterations to run kmeans when clustering sigma -- sigma value when setting up weight matrix Return value: None """ if nodes is None: nodes = self.nodes() # Compute weight matrix W = self.compute_W(self.merge_priority_function, nodes=nodes) # Run normalized cut labels, eigvec, eigval = ncutW(W, num_clusters, kmeans_iters, **kwargs) # Merge nodes that are in same cluster self.cluster_by_labels(labels, nodes) def cluster_by_labels(self, labels, nodes=None): """Merge all superpixels with the same label (1 label per 1 sp)""" if nodes is None: nodes = array(self.nodes()) if not (len(labels) == len(nodes)): raise ValueError('Number of labels should be %d but is %d.', self.number_of_nodes(), len(labels)) for l in unique(labels): inds = nonzero(labels==l)[0] nodes_to_merge = nodes[inds] node1 = nodes_to_merge[0] for node in nodes_to_merge[1:]: self.merge_nodes(node1, node) def compute_W(self, merge_priority_function, sigma=255.0*20, nodes=None): """ Computes the weight matrix for clustering""" if nodes is None: nodes = array(self.nodes()) n = len(nodes) nodes2ind = dict(zip(nodes, range(n))) W = lil_matrix((n,n)) for u, v in self.real_edges(nodes): try: i, j = nodes2ind[u], nodes2ind[v] except KeyError: continue w = merge_priority_function(self,u,v) W[i,j] = W[j,i] = np.exp(-w**2/sigma) return W def update_frozen_sets(self, n1, n2): self.frozen_nodes.discard(n1) self.frozen_nodes.discard(n2) for x, y in self.frozen_edges.copy(): if n2 in [x, y]: self.frozen_edges.discard((x, y)) if x == n2: self.frozen_edges.add((n1, y)) if y == n2: self.frozen_edges.add((x, n1)) def get_edge_coordinates(g, n1, n2, arbitrary=False): """Find where in the segmentation the edge (n1, n2) is most visible.""" boundary = g.boundary(n1, n2) if arbitrary: # quickly get an arbitrary point on the boundary idx = boundary.pop(); boundary.append(idx) coords = unravel_index(idx, g.watershed.shape) else: boundary_idxs = unravel_index(boundary, g.watershed.shape) coords = [bincount(dimcoords).argmax() for dimcoords in boundary_idxs] return array(coords) - g.pad_thickness def is_mito_boundary(g, n1, n2, channel=2, threshold=0.5): return max(np.mean(g.probabilities_r[g.boundary(n1, n2), c]) for c in channel) > threshold def is_mito(g, n, channel=2, threshold=0.5): return max(np.mean(g.probabilities_r[g.extent(n), c]) for c in channel) > threshold def best_possible_segmentation(ws, gt): """Build the best possible segmentation given a superpixel map.""" ws = Rag(ws) assignment = ev.assignment_table(ws.get_segmentation(), gt).tocsc() for gt_node in range(assignment.shape[1]): i, j = assignment.indptr[gt_node : gt_node+2] ws.merge_subgraph(assignment.indices[i:j]) return ws.get_segmentation()
#!/usr/bin/env python """ author: Stefano Gariazzo <gariazzo@ific.uv.es> Read the .tex files in a folder and creates a .bib file for the compilation of the .tex document. You should use INSPIRES keys for the bibtex entries in order to have a good behaviour of the code. Usage: python bibtexImporter.py "folder/where/tex/files/are/" "name_of_output.bib" assuming that you want to compile some tex file(s): "folder/where/tex/files/are/*.tex" and that your .bib file should be called: "folder/where/tex/files/are/name_of_output.bib". The code reads the texs to detect which entries must be saved in the output .bib file, checks the .bib files in the main local database (path configured in the script) to copy the entries that are already stored there or fetches the missing ones in INSPIRES. The downloaded information is saved both in the local database and in the output file. """ import sys,re,os import urllib2 #configuration: folder and main file where to save the downloaded entries bibfolder = '/home/gariazzo/Latex/bib/' saveInFile = "tmp.bib" ################################################################ # unicode characters replacements. # INSPIRES has the bad habit of using non-ascii characters that can't be saved properly in non-unicode files. # just convert the bad characters into standard ones. unicode_to_latex = { u"\u00C0": "A", u"\u00C1": "A", u"\u00C2": "A", u"\u00C3": "A", u"\u00C4": "A", u"\u00C5": "AA", u"\u00C6": "AE", u"\u00C7": "C", u"\u00C8": "E", u"\u00C9": "E", u"\u00CA": "E", u"\u00CB": "E", u"\u00CC": "I", u"\u00CD": "I", u"\u00CE": "I", u"\u00CF": "I", u"\u00D0": "DH", u"\u00D1": "N", u"\u00D2": "O", u"\u00D3": "O", u"\u00D4": "O", u"\u00D5": "O", u"\u00D6": "O", u"\u00D8": "O", u"\u00D9": "U", u"\u00DA": "U", u"\u00DB": "U", u"\u00DC": "U", u"\u00DD": "Y", u"\u00DE": "TH", u"\u00DF": "ss", u"\u00E0": "a", u"\u00E1": "a", u"\u00E2": "a", u"\u00E3": "a", u"\u00E4": "a", u"\u00E5": "aa", u"\u00E6": "ae", u"\u00E7": "c", u"\u00E8": "e", u"\u00E9": "e", u"\u00EA": "e", u"\u00EB": "e", u"\u00EC": "i", u"\u00ED": "i", u"\u00EE": "i", u"\u00EF": "i", u"\u00F0": "dh", u"\u00F1": "n", u"\u00F2": "o", u"\u00F3": "o", u"\u00F4": "o", u"\u00F5": "o", u"\u00F6": "o", u"\u00F8": "o", u"\u00F9": "u", u"\u00FA": "u", u"\u00FB": "u", u"\u00FC": "u", u"\u00FD": "y", u"\u00FE": "th", u"\u00FF": "y", u"\u0100": "A", u"\u0101": "a", u"\u0102": "A", u"\u0103": "a", u"\u0104": "A", u"\u0105": "a", u"\u0410": "A", u"\u0106": "C", u"\u0107": "c", u"\u0108": "C", u"\u0109": "c", u"\u010A": "C", u"\u010B": "c", u"\u010C": "C", u"\u010D": "c", u"\u010E": "D", u"\u010F": "d", u"\u0110": "DJ", u"\u0111": "dj", u"\u0112": "E", u"\u0113": "e", u"\u0114": "E", u"\u0115": "e", u"\u0116": "E", u"\u0117": "e", u"\u0118": "E", u"\u0119": "e", u"\u011A": "E", u"\u011B": "e", u"\u011C": "G", u"\u011D": "g", u"\u011E": "G", u"\u011F": "g", u"\u0120": "G", u"\u0121": "g", u"\u0122": "G", u"\u0123": "g", u"\u0124": "H", u"\u0125": "h", u"\u0128": "I", u"\u0129": "i", u"\u012A": "I", u"\u012B": "i", u"\u012C": "I", u"\u012D": "i", u"\u012E": "I", u"\u012F": "i", u"\u0130": "I", u"\u0131": "i", u"\u0132": "IJ", u"\u0133": "ij", u"\u0134": "J", u"\u0135": "j", u"\u0136": "K", u"\u0137": "k", u"\u0139": "L", u"\u013A": "l", u"\u013B": "L", u"\u013C": "l", u"\u013D": "L", u"\u013E": "l", u"\u0141": "L", u"\u0142": "l", u"\u0143": "N", u"\u0144": "n", u"\u0145": "N", u"\u0146": "n", u"\u0147": "N", u"\u0148": "n", u"\u0149": "n", u"\u014A": "NG", u"\u014B": "ng", u"\u014C": "O", u"\u014D": "o", u"\u014E": "O", u"\u014F": "o", u"\u0150": "O", u"\u0151": "o", u"\u0152": "OE", u"\u0153": "oe", u"\u0154": "R", u"\u0155": "r", u"\u0156": "R", u"\u0157": "r", u"\u0158": "R", u"\u0159": "r", u"\u015A": "S", u"\u015B": "s", u"\u015C": "S", u"\u015D": "s", u"\u015E": "S", u"\u015F": "s", u"\u0160": "S", u"\u0161": "s", u"\u0162": "T", u"\u0163": "t", u"\u0164": "T", u"\u0165": "t", u"\u0168": "U", u"\u0169": "u", u"\u016A": "U", u"\u016B": "u", u"\u016C": "U", u"\u016D": "u", u"\u016E": "U", u"\u016F": "u", u"\u0170": "U", u"\u0171": "u", u"\u0172": "U", u"\u0173": "u", u"\u0174": "W", u"\u0175": "w", u"\u0176": "Y", u"\u0177": "y", u"\u0178": "Y", u"\u0179": "Z", u"\u017A": "z", u"\u017B": "Z", u"\u017C": "z", u"\u017D": "Z", u"\u017E": "z", u"\u01F5": "g", u"\u03CC": "o", u"\u2013": "-", u"\u207b": "-", u"\u2014": "--", u"\u2015": "---", u"\u2018": "'", u"\u2019": "'", u"\xa0": " ", } translation_table = dict([(ord(k), unicode(v)) for k, v in unicode_to_latex.items()]) def parse_accents_str(string): """needed to remove bad unicode characters that cannot printed well""" if string is not None and string is not "": string = string.translate(translation_table) return string def writeToFile(text, stream, k): """write to file with try/except to avoid problems with unicode""" try: stream.write(text) return True except UnicodeEncodeError: print "the current entry '%s' cannot be saved since it contains a bad unicode character!"%m return False def retrieveurl(bibkey): """search Inspires for the missing entries""" url="http://inspirehep.net/search?p=" + bibkey + "&sf=&so=d&rm=&rg=1000&sc=0&of=hx&em=B"; print "looking for '%s' in %s"%(bibkey, url) response = urllib2.urlopen(url) data = response.read() # a `bytes` object text = data.decode('utf-8') i1=text.find("<pre>") i2=text.find("</pre>") if i1>0 and i2>0: text=text[i1+5:i2] else: text="" return text #read which bib files are in the main folder l=os.listdir(bibfolder) bibs=[] for e in l: if e.find('.bib')>0 and e.find('.bak')<0: bibs.append(e) #read the folder name, scan it for the existing .tex files to be compiled keysfold=sys.argv[1] l=os.listdir(keysfold) texs=[] for e in l: if e.find('.tex')>0 and e.find('.bac')<0 and e.find('~')<0: texs.append(e) #name of the output .bib file to be saved outfile=sys.argv[2] #print some information print "reading keys from "+keysfold+" folder:" print " "+" ".join(texs) print "bib entries from "+bibfolder+" directory:" print " "+" ".join(bibs) print "saving in "+keysfold+outfile+"\n" if not os.path.isfile(keysfold+outfile): with open(keysfold+outfile,'a'): os.utime(keysfold+outfile,None) #save content of tex files in a string: keyscont="" for t in texs: with open(keysfold+t) as r: keyscont += r.read() #if existing, read output file (to detect which bibtex entries are already there:) with open(keysfold+outfile) as r: outcont = r.read() #read the local bibtex database in the specified folder allbib="" for b in bibs: with open(bibfolder+b) as r: allbib += r.read() #regular expression utilities cite=re.compile('\\\\cite\{([A-Za-z]*:[0-9]*[a-z]*[,]?[\n ]*|[A-Za-z0-9\-][,]?[\n ]*)*\}',re.MULTILINE) #find \cite{...} bibel=re.compile('@[a-zA-Z]*\{([A-Za-z]*:[0-9]*[a-z]*)?,',re.MULTILINE|re.DOTALL) #find the @Article(or other)...}, entry for the key "m" bibty=re.compile('@[a-zA-Z]*\{',re.MULTILINE|re.DOTALL) #find the @Article(or other) entry for the key "m" #You can add here more fields that you have in your local bib file but that are not necessary for compilation: unw1=re.compile('[ ]*(Owner|Timestamp|__markedentry|File)+[ ]*=.*?,[\n]*') #remove unwanted fields unw2=re.compile('[ ]*(Owner|Timestamp|__markedentry|File)+[ ]*=.*?[\n ]*\}') #remove unwanted fields unw3=re.compile('[ ]*Abstract[ ]*=[ ]*[{]+(.*?)[}]+,',re.MULTILINE) #remove Abstract field #use regex to detect "\cite{...}" commands in the tex files: citaz=[m for m in cite.finditer(keyscont)] strs=[] #for each "\cite{...}", extract the cited bibtex keys for c in citaz: b=c.group().replace(r'\cite{','') d=b.replace(' ','') b=d.replace('\n','') d=b.replace(r'}','') a=d.split(',') for e in a: if e not in strs: strs.append(e) print "keys found: %d"%len(strs) missing=[] warnings=0 #read the list of bibtex needed keys and check which ones are already in the main bibliography files for s in strs: if s not in outcont: missing.append(s) print "missing: %d"%len(missing) notfound="" keychange="" #enters the main loop. If entries exist locally, they are just copied, else INSPIRES will be searched for them for m in missing: art=re.compile('@[a-zA-Z]*\{'+m+',.*?@',re.MULTILINE|re.DOTALL) #find the @Article(or other) entry for the key "m" t=[j for j in art.finditer(allbib)] #local bibtexs match: remove unwanted stuff and copy to output file if len(t)>0: a=t[0].group() bib='@'+a.replace('@','') for u in unw1.finditer(bib): bib=bib.replace(u.group(),'') for u in unw2.finditer(bib): bib=bib.replace(u.group(),'') for u in unw3.finditer(bib): bib=bib.replace(u.group(),'') bibf = '\n'.join([line for line in bib.split('\n') if line.strip() ]) with open(keysfold+outfile,"a") as o: if writeToFile(bibf+"\n", o, m): print "- %s inserted!"%m #entry missing in local database: search inspires else: new=parse_accents_str(retrieveurl(m))#open search url if len(new): #sometimes inspires changes the bibtex keys after some time. #save the entry in the output .bib file and give a warning if it happened for the current entry with open(keysfold+outfile,"a") as o: if not new.find(m)>0: warnings+=1 t=[j.group() for j in bibel.finditer(new)] t1=[] for s in t: for u in bibty.finditer(s): s=s.replace(u.group(),'') s=s.replace(',','') t1.append(s) keychange+= "--> WARNING! %s has a new key: %s\n"%(m,t1[0]) #first, save the bibtex as it is in the main database or temporary file: for s in t1: if m not in allbib and str(s) not in allbib: with open(bibfolder+saveInFile,"a") as o: if writeToFile(new+"\n",o,m): print "'%s' (new key '%s') retrieved by InspireHEP and inserted into %s file - %d bytes"%(m,s,saveInFile,len(new)) if str(s) not in outcont: if writeToFile(new+"\n",o,m): print "... and it was inserted in the .bib file" else: notfound+="-- warning: missing entry for %s\n"%m warnings+=1 #print resume of what has been done: keys that don't exist in INSPIRES, entries whose key has been changed, total number of warnings print "finished!\n" print notfound print keychange print "--> %d warning(s) occurred!"%warnings python3 compatibility #!/usr/bin/env python """ author: Stefano Gariazzo <gariazzo@ific.uv.es> Read the .tex files in a folder and creates a .bib file for the compilation of the .tex document. You should use INSPIRES keys for the bibtex entries in order to have a good behaviour of the code. Usage: python bibtexImporter.py "folder/where/tex/files/are/" "name_of_output.bib" assuming that you want to compile some tex file(s): "folder/where/tex/files/are/*.tex" and that your .bib file should be called: "folder/where/tex/files/are/name_of_output.bib". The code reads the texs to detect which entries must be saved in the output .bib file, checks the .bib files in the main local database (path configured in the script) to copy the entries that are already stored there or fetches the missing ones in INSPIRES. The downloaded information is saved both in the local database and in the output file. """ import sys,re,os try: # For Python 3.0 and later from urllib.request import urlopen except ImportError: # Fall back to Python 2's urllib2 from urllib2 import urlopen #configuration: folder and main file where to save the downloaded entries bibfolder = '/home/gariazzo/Latex/bib/' saveInFile = "tmp.bib" ################################################################ # unicode characters replacements. # INSPIRES has the bad habit of using non-ascii characters that can't be saved properly in non-unicode files. # just convert the bad characters into standard ones. unicode_to_latex = { u"\u00C0": "A", u"\u00C1": "A", u"\u00C2": "A", u"\u00C3": "A", u"\u00C4": "A", u"\u00C5": "AA", u"\u00C6": "AE", u"\u00C7": "C", u"\u00C8": "E", u"\u00C9": "E", u"\u00CA": "E", u"\u00CB": "E", u"\u00CC": "I", u"\u00CD": "I", u"\u00CE": "I", u"\u00CF": "I", u"\u00D0": "DH", u"\u00D1": "N", u"\u00D2": "O", u"\u00D3": "O", u"\u00D4": "O", u"\u00D5": "O", u"\u00D6": "O", u"\u00D8": "O", u"\u00D9": "U", u"\u00DA": "U", u"\u00DB": "U", u"\u00DC": "U", u"\u00DD": "Y", u"\u00DE": "TH", u"\u00DF": "ss", u"\u00E0": "a", u"\u00E1": "a", u"\u00E2": "a", u"\u00E3": "a", u"\u00E4": "a", u"\u00E5": "aa", u"\u00E6": "ae", u"\u00E7": "c", u"\u00E8": "e", u"\u00E9": "e", u"\u00EA": "e", u"\u00EB": "e", u"\u00EC": "i", u"\u00ED": "i", u"\u00EE": "i", u"\u00EF": "i", u"\u00F0": "dh", u"\u00F1": "n", u"\u00F2": "o", u"\u00F3": "o", u"\u00F4": "o", u"\u00F5": "o", u"\u00F6": "o", u"\u00F8": "o", u"\u00F9": "u", u"\u00FA": "u", u"\u00FB": "u", u"\u00FC": "u", u"\u00FD": "y", u"\u00FE": "th", u"\u00FF": "y", u"\u0100": "A", u"\u0101": "a", u"\u0102": "A", u"\u0103": "a", u"\u0104": "A", u"\u0105": "a", u"\u0410": "A", u"\u0106": "C", u"\u0107": "c", u"\u0108": "C", u"\u0109": "c", u"\u010A": "C", u"\u010B": "c", u"\u010C": "C", u"\u010D": "c", u"\u010E": "D", u"\u010F": "d", u"\u0110": "DJ", u"\u0111": "dj", u"\u0112": "E", u"\u0113": "e", u"\u0114": "E", u"\u0115": "e", u"\u0116": "E", u"\u0117": "e", u"\u0118": "E", u"\u0119": "e", u"\u011A": "E", u"\u011B": "e", u"\u011C": "G", u"\u011D": "g", u"\u011E": "G", u"\u011F": "g", u"\u0120": "G", u"\u0121": "g", u"\u0122": "G", u"\u0123": "g", u"\u0124": "H", u"\u0125": "h", u"\u0128": "I", u"\u0129": "i", u"\u012A": "I", u"\u012B": "i", u"\u012C": "I", u"\u012D": "i", u"\u012E": "I", u"\u012F": "i", u"\u0130": "I", u"\u0131": "i", u"\u0132": "IJ", u"\u0133": "ij", u"\u0134": "J", u"\u0135": "j", u"\u0136": "K", u"\u0137": "k", u"\u0139": "L", u"\u013A": "l", u"\u013B": "L", u"\u013C": "l", u"\u013D": "L", u"\u013E": "l", u"\u0141": "L", u"\u0142": "l", u"\u0143": "N", u"\u0144": "n", u"\u0145": "N", u"\u0146": "n", u"\u0147": "N", u"\u0148": "n", u"\u0149": "n", u"\u014A": "NG", u"\u014B": "ng", u"\u014C": "O", u"\u014D": "o", u"\u014E": "O", u"\u014F": "o", u"\u0150": "O", u"\u0151": "o", u"\u0152": "OE", u"\u0153": "oe", u"\u0154": "R", u"\u0155": "r", u"\u0156": "R", u"\u0157": "r", u"\u0158": "R", u"\u0159": "r", u"\u015A": "S", u"\u015B": "s", u"\u015C": "S", u"\u015D": "s", u"\u015E": "S", u"\u015F": "s", u"\u0160": "S", u"\u0161": "s", u"\u0162": "T", u"\u0163": "t", u"\u0164": "T", u"\u0165": "t", u"\u0168": "U", u"\u0169": "u", u"\u016A": "U", u"\u016B": "u", u"\u016C": "U", u"\u016D": "u", u"\u016E": "U", u"\u016F": "u", u"\u0170": "U", u"\u0171": "u", u"\u0172": "U", u"\u0173": "u", u"\u0174": "W", u"\u0175": "w", u"\u0176": "Y", u"\u0177": "y", u"\u0178": "Y", u"\u0179": "Z", u"\u017A": "z", u"\u017B": "Z", u"\u017C": "z", u"\u017D": "Z", u"\u017E": "z", u"\u01F5": "g", u"\u03CC": "o", u"\u2013": "-", u"\u207b": "-", u"\u2014": "--", u"\u2015": "---", u"\u2018": "'", u"\u2019": "'", u"\xa0": " ", } try: translation_table = dict([(ord(k), unicode(v)) for k, v in unicode_to_latex.items()]) except NameError: translation_table = dict([(ord(k), str(v)) for k, v in unicode_to_latex.items()]) def parse_accents_str(string): """needed to remove bad unicode characters that cannot printed well""" if string is not None and string is not "": string = string.translate(translation_table) return string def writeToFile(text, filename, k): """write to file with try/except to avoid problems with unicode""" try: with open(filename,"a") as stream: stream.write(text) return True except UnicodeEncodeError: print("the current entry '%s' cannot be saved since it contains a bad unicode character!"%m) return False def retrieveurl(bibkey): """search Inspires for the missing entries""" url="http://inspirehep.net/search?p=" + bibkey + "&sf=&so=d&rm=&rg=1000&sc=0&of=hx&em=B"; print("looking for '%s' in %s"%(bibkey, url)) response = urlopen(url) data = response.read() # a `bytes` object text = data.decode('utf-8') i1=text.find("<pre>") i2=text.find("</pre>") if i1>0 and i2>0: text=text[i1+5:i2] else: text="" return text #read which bib files are in the main folder l=os.listdir(bibfolder) bibs=[] for e in l: if e.find('.bib')>0 and e.find('.bak')<0: bibs.append(e) #read the folder name, scan it for the existing .tex files to be compiled keysfold=sys.argv[1] l=os.listdir(keysfold) texs=[] for e in l: if e.find('.tex')>0 and e.find('.bac')<0 and e.find('~')<0: texs.append(e) #name of the output .bib file to be saved outfile=sys.argv[2] #print some information print("reading keys from "+keysfold+" folder:") print(" "+" ".join(texs)) print("bib entries from "+bibfolder+" directory:") print(" "+" ".join(bibs)) print("saving in "+keysfold+outfile+"\n") if not os.path.isfile(keysfold+outfile): with open(keysfold+outfile,'a'): os.utime(keysfold+outfile,None) #save content of tex files in a string: keyscont="" for t in texs: with open(keysfold+t) as r: keyscont += r.read() #if existing, read output file (to detect which bibtex entries are already there:) with open(keysfold+outfile) as r: outcont = r.read() #read the local bibtex database in the specified folder allbib="" for b in bibs: with open(bibfolder+b) as r: allbib += r.read() #regular expression utilities cite=re.compile('\\\\cite\{([A-Za-z]*:[0-9]*[a-z]*[,]?[\n ]*|[A-Za-z0-9\-][,]?[\n ]*)*\}',re.MULTILINE) #find \cite{...} bibel=re.compile('@[a-zA-Z]*\{([A-Za-z]*:[0-9]*[a-z]*)?,',re.MULTILINE|re.DOTALL) #find the @Article(or other)...}, entry for the key "m" bibty=re.compile('@[a-zA-Z]*\{',re.MULTILINE|re.DOTALL) #find the @Article(or other) entry for the key "m" #You can add here more fields that you have in your local bib file but that are not necessary for compilation: unw1=re.compile('[ ]*(Owner|Timestamp|__markedentry|File)+[ ]*=.*?,[\n]*') #remove unwanted fields unw2=re.compile('[ ]*(Owner|Timestamp|__markedentry|File)+[ ]*=.*?[\n ]*\}') #remove unwanted fields unw3=re.compile('[ ]*Abstract[ ]*=[ ]*[{]+(.*?)[}]+,',re.MULTILINE) #remove Abstract field #use regex to detect "\cite{...}" commands in the tex files: citaz=[m for m in cite.finditer(keyscont)] strs=[] #for each "\cite{...}", extract the cited bibtex keys for c in citaz: b=c.group().replace(r'\cite{','') d=b.replace(' ','') b=d.replace('\n','') d=b.replace(r'}','') a=d.split(',') for e in a: if e not in strs: strs.append(e) print("keys found: %d"%len(strs)) missing=[] warnings=0 #read the list of bibtex needed keys and check which ones are already in the main bibliography files for s in strs: if s not in outcont: missing.append(s) print("missing: %d"%len(missing)) notfound="" keychange="" #enters the main loop. If entries exist locally, they are just copied, else INSPIRES will be searched for them for m in missing: art=re.compile('@[a-zA-Z]*\{'+m+',.*?@',re.MULTILINE|re.DOTALL) #find the @Article(or other) entry for the key "m" t=[j for j in art.finditer(allbib)] #local bibtexs match: remove unwanted stuff and copy to output file if len(t)>0: a=t[0].group() bib='@'+a.replace('@','') for u in unw1.finditer(bib): bib=bib.replace(u.group(),'') for u in unw2.finditer(bib): bib=bib.replace(u.group(),'') for u in unw3.finditer(bib): bib=bib.replace(u.group(),'') bibf = '\n'.join([line for line in bib.split('\n') if line.strip() ]) if writeToFile(bibf+"\n", keysfold+outfile, m): print("- %s inserted!"%m) #entry missing in local database: search inspires else: new=parse_accents_str(retrieveurl(m))#open search url if len(new): #sometimes inspires changes the bibtex keys after some time. #save the entry in the output .bib file and give a warning if it happened for the current entry if not new.find(m)>0: warnings+=1 t=[j.group() for j in bibel.finditer(new)] t1=[] for s in t: for u in bibty.finditer(s): s=s.replace(u.group(),'') s=s.replace(',','') t1.append(s) keychange+= "--> WARNING! %s has a new key: %s\n"%(m,t1[0]) #first, save the bibtex as it is in the main database or temporary file: for s in t1: if m not in allbib and str(s) not in allbib: if writeToFile(new+"\n",bibfolder+saveInFile,m): print("'%s' (new key '%s') retrieved by InspireHEP and inserted into %s file - %d bytes"%(m,s,saveInFile,len(new))) if str(s) not in outcont: if writeToFile(new+"\n",keysfold+outfile,m): print("... and it was inserted in the .bib file") else: notfound+="-- warning: missing entry for %s\n"%m warnings+=1 #print resume of what has been done: keys that don't exist in INSPIRES, entries whose key has been changed, total number of warnings print("finished!\n") print(notfound) print(keychange) print("--> %d warning(s) occurred!"%warnings)
from django.db import models from django import forms from wheelcms_axle.content import Content from wheelcms_axle.node import Node from wheelcms_spokes.page import PageBase, PageType, PageForm from wheelcms_axle.content import type_registry from wheelcms_axle.impexp import WheelSerializer class CategorySerializer(WheelSerializer): extra = WheelSerializer.extra + ('items', ) def serialize_extra_items(self, field, o): """ serialize 'items'. Since it's a m2m, field will be a string """ res = [] for i in o.items.all(): res.append(dict(name="item", value=i.node.path)) return dict(name="items", value=res) def deserialize_extra_items(self, extra, tree, model): items = [] for item in tree.findall("items/item"): items.append(item.text) def delay_items(): for i in items: ## make absolute path relative to basenode. Relative path ## may be "" / None which means the root, or (relatively) ## self.basenode if not i or i == '/': n = self.basenode else: n = self.basenode.child(i.lstrip('/')) # import pytest; pytest.set_trace() model.items.add(n.content()) return delay_items class Category(PageBase): items = models.ManyToManyField(Content, related_name="categories") ## manytomany to content def __unicode__(self): return self.title class CategoryForm(PageForm): ## exclude categories? class Meta(PageForm.Meta): model = Category items = forms.ModelMultipleChoiceField(queryset=Content.objects.all(), required=False) class CategoryType(PageType): model = Category title = "A category" form = CategoryForm serializer = CategorySerializer @property def icon(self): ## assume that if this category contains children, they're ## categories themselves and this is more a collection of ## categories. if self.instance.node.children().exists(): return "categories.png" return "category.png" @classmethod def extend_form(cls, f, *args, **kwargs): f.fields['categories'] = forms.ModelMultipleChoiceField( queryset=Category.objects.all(), required=False) if 'instance' in kwargs: f.fields['categories'].initial = kwargs['instance'].categories.all() f.advanced_fields += ["categories"] @classmethod def extend_save(cls, form, instance, commit=True): old_save_m2m = form.save_m2m def save_m2m(): old_save_m2m() instance.categories.clear() for cat in form.cleaned_data['categories']: instance.categories.add(cat) form.save_m2m = save_m2m type_registry.register(CategoryType, extends=Content) exclude from search. Fixes #636 from django.db import models from django import forms from wheelcms_axle.content import Content from wheelcms_axle.node import Node from wheelcms_spokes.page import PageBase, PageType, PageForm from wheelcms_axle.content import type_registry from wheelcms_axle.impexp import WheelSerializer class CategorySerializer(WheelSerializer): extra = WheelSerializer.extra + ('items', ) def serialize_extra_items(self, field, o): """ serialize 'items'. Since it's a m2m, field will be a string """ res = [] for i in o.items.all(): res.append(dict(name="item", value=i.node.path)) return dict(name="items", value=res) def deserialize_extra_items(self, extra, tree, model): items = [] for item in tree.findall("items/item"): items.append(item.text) def delay_items(): for i in items: ## make absolute path relative to basenode. Relative path ## may be "" / None which means the root, or (relatively) ## self.basenode if not i or i == '/': n = self.basenode else: n = self.basenode.child(i.lstrip('/')) # import pytest; pytest.set_trace() model.items.add(n.content()) return delay_items class Category(PageBase): items = models.ManyToManyField(Content, related_name="categories") ## manytomany to content def __unicode__(self): return self.title class CategoryForm(PageForm): ## exclude categories? class Meta(PageForm.Meta): model = Category items = forms.ModelMultipleChoiceField(queryset=Content.objects.all(), required=False) class CategoryType(PageType): model = Category title = "A category" form = CategoryForm serializer = CategorySerializer add_to_index = False @property def icon(self): ## assume that if this category contains children, they're ## categories themselves and this is more a collection of ## categories. if self.instance.node.children().exists(): return "categories.png" return "category.png" @classmethod def extend_form(cls, f, *args, **kwargs): f.fields['categories'] = forms.ModelMultipleChoiceField( queryset=Category.objects.all(), required=False) if 'instance' in kwargs: f.fields['categories'].initial = kwargs['instance'].categories.all() f.advanced_fields += ["categories"] @classmethod def extend_save(cls, form, instance, commit=True): old_save_m2m = form.save_m2m def save_m2m(): old_save_m2m() instance.categories.clear() for cat in form.cleaned_data['categories']: instance.categories.add(cat) form.save_m2m = save_m2m type_registry.register(CategoryType, extends=Content)
""" Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks). Work done by D. Waroquiers, A. Jain, and M. Kocher. The main difference wrt the Fireworks implementation is that the QueueAdapter objects provide a programmatic interface for setting important attributes such as the number of MPI nodes, the number of OMP threads and the memory requirements. This programmatic interface is used by the `TaskManager` for optimizing the parameters of the run before submitting the job (Abinit provides the autoparal option that allows one to get a list of parallel configuration and their expected efficiency). """ from __future__ import print_function, division import os import abc import string import copy import getpass from subprocess import Popen, PIPE from pymatgen.io.abinitio.launcher import ScriptEditor from pymatgen.util.string_utils import is_string import logging logger = logging.getLogger(__name__) __all__ = [ "MpiRunner", "qadapter_class", ] class Command(object): """ From https://gist.github.com/kirpit/1306188 Enables to run subprocess commands in a different thread with TIMEOUT option. Based on jcollado's solution: http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933 """ command = None process = None status = None output, error = '', '' def __init__(self, command): if is_string(command): import shlex command = shlex.split(command) self.command = command def run(self, timeout=None, **kwargs): """ Run a command then return: (status, output, error). """ def target(**kwargs): try: self.process = Popen(self.command, **kwargs) self.output, self.error = self.process.communicate() self.status = self.process.returncode except: import traceback self.error = traceback.format_exc() self.status = -1 # default stdout and stderr if 'stdout' not in kwargs: kwargs['stdout'] = PIPE if 'stderr' not in kwargs: kwargs['stderr'] = PIPE # thread import threading thread = threading.Thread(target=target, kwargs=kwargs) thread.start() thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() return self.status, self.output, self.error class MpiRunner(object): """ This object provides an abstraction for the mpirunner provided by the different MPI libraries. It's main task is handling the different syntax and options supported by the different mpirunners. """ def __init__(self, name, type=None, options=""): self.name = name self.type = None self.options = options def string_to_run(self, executable, mpi_ncpus, stdin=None, stdout=None, stderr=None): stdin = "< " + stdin if stdin is not None else "" stdout = "> " + stdout if stdout is not None else "" stderr = "2> " + stderr if stderr is not None else "" if self.has_mpirun: if self.type is None: # TODO: better treatment of mpirun syntax. #se.add_line('$MPIRUN -n $MPI_NCPUS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR') num_opt = "-n " + str(mpi_ncpus) cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr]) else: raise NotImplementedError("type %s is not supported!") else: #assert mpi_ncpus == 1 cmd = " ".join([executable, stdin, stdout, stderr]) return cmd @property def has_mpirun(self): """True if we are running via mpirun, mpiexec ...""" return self.name is not None def qadapter_class(qtype): """Return the concrete `Adapter` class from a string.""" return {"shell": ShellAdapter, "slurm": SlurmAdapter, "pbs": PbsAdapter, "sge": SGEAdapter, }[qtype.lower()] class QueueAdapterError(Exception): """Error class for exceptions raise by QueueAdapter.""" class AbstractQueueAdapter(object): """ The QueueAdapter is responsible for all interactions with a specific queue management system. This includes handling all details of queue script format as well as queue submission and management. This is the Abstract base class defining the methods that must be implemented by the concrete classes. A user should extend this class with implementations that work on specific queue systems. """ __metaclass__ = abc.ABCMeta Error = QueueAdapterError def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None, pre_run=None, post_run=None, mpi_runner=None): """ Args: setup: String or list of commands to execute during the initial setup. modules: String or list of modules to load before running the application. shell_env: Dictionary with the environment variables to export before running the application. omp_env: Dictionary with the OpenMP variables. pre_run: String or list of commands to execute before launching the calculation. post_run: String or list of commands to execute once the calculation is completed. mpi_runner: Path to the MPI runner or `MpiRunner` instance. None if not used """ # Make defensive copies so that we can change the values at runtime. self.qparams = qparams.copy() if qparams is not None else {} if is_string(setup): setup = [setup] self.setup = setup[:] if setup is not None else [] self.omp_env = omp_env.copy() if omp_env is not None else {} if is_string(modules): modules = [modules] self.modules = modules[:] if modules is not None else [] self.shell_env = shell_env.copy() if shell_env is not None else {} self.mpi_runner = mpi_runner if not isinstance(mpi_runner, MpiRunner): self.mpi_runner = MpiRunner(mpi_runner) if is_string(pre_run): pre_run = [pre_run] self.pre_run = pre_run[:] if pre_run is not None else [] if is_string(post_run): post_run = [post_run] self.post_run = post_run[:] if post_run is not None else [] # Parse the template so that we know the list of supported options. cls = self.__class__ if hasattr(cls, "QTEMPLATE"): # Consistency check. err_msg = "" for param in self.qparams: if param not in self.supported_qparams: err_msg += "Unsupported QUEUE parameter name %s\n" % param if err_msg: raise ValueError(err_msg) def copy(self): return copy.copy(self) def deepcopy(self): return copy.deepcopy(self) @property def supported_qparams(self): """ Dictionary with the supported parameters that can be passed to the queue manager (obtained by parsing QTEMPLATE). """ try: return self._supported_qparams except AttributeError: import re self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE) return self._supported_qparams @property def has_mpirun(self): """True if we are using a mpirunner""" return bool(self.mpi_runner) @property def has_omp(self): """True if we are using OpenMP threads""" return hasattr(self,"omp_env") and bool(getattr(self, "omp_env")) @property def tot_ncpus(self): """Total number of CPUs employed""" return self.mpi_ncpus * self.omp_ncpus @property def omp_ncpus(self): """Number of OpenMP threads.""" if self.has_omp: return self.omp_env["OMP_NUM_THREADS"] else: return 1 @abc.abstractproperty def mpi_ncpus(self): """Number of CPUs used for MPI.""" @abc.abstractmethod def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" #@abc.abstractproperty #def queue_walltime(self): # """Returns the walltime in seconds.""" #@abc.abstractmethod #def set_queue_walltime(self): # """Set the walltime in seconds.""" #@abc.abstractproperty #def mem_per_cpu(self): # """The memory per CPU in Megabytes.""" @abc.abstractmethod def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" #@property #def tot_mem(self): # """Total memory required by the job n Megabytes.""" # return self.mem_per_cpu * self.mpi_ncpus @abc.abstractmethod def cancel(self, job_id): """ Cancel the job. Args: job_id: (in) Job identifier. Returns: Exit status. """ def _make_qheader(self, job_name, qout_path, qerr_path): """Return a string with the options that are passed to the resource manager.""" qtemplate = QScriptTemplate(self.QTEMPLATE) # set substitution dict for replacements into the template and clean null values subs_dict = {k: v for k,v in self.qparams.items() if v is not None} # Set job_name and the names for the stderr and stdout of the # queue manager (note the use of the extensions .qout and .qerr # so that we can easily locate this file. subs_dict['job_name'] = job_name.replace('/','_') subs_dict['_qout_path'] = qout_path subs_dict['_qerr_path'] = qerr_path # might contain unused parameters as leftover $$. unclean_template = qtemplate.safe_substitute(subs_dict) # Remove lines with leftover $$. clean_template = [] for line in unclean_template.split('\n'): if '$$' not in line: clean_template.append(line) return '\n'.join(clean_template) def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path, stdin=None, stdout=None, stderr=None): """ Returns a (multi-line) String representing the queue script, e.g. PBS script. Uses the template_file along with internal parameters to create the script. Args: launch_dir: (str) The directory the job will be launched in. qout_path Path of the Queue manager output file. qerr_path: Path of the Queue manager error file. """ # Construct the header for the Queue Manager. qheader = self._make_qheader(job_name, qout_path, qerr_path) # Add the bash section. se = ScriptEditor() if self.setup: se.add_comment("Setup section") se.add_lines(self.setup) if self.modules: se.add_comment("Load Modules") se.add_line("module purge") se.load_modules(self.modules) if self.has_omp: se.add_comment("OpenMp Environment") se.declare_vars(self.omp_env) if self.shell_env: se.add_comment("Shell Environment") se.declare_vars(self.shell_env) # Cd to launch_dir se.add_line("cd " + os.path.abspath(launch_dir)) if self.pre_run: se.add_comment("Commands before execution") se.add_lines(self.pre_run) # Construct the string to run the executable with MPI and mpi_ncpus. mpi_ncpus = self.mpi_ncpus line = self.mpi_runner.string_to_run(executable, mpi_ncpus, stdin=stdin, stdout=stdout, stderr=stderr) se.add_line(line) if self.post_run: se.add_comment("Commands after execution") se.add_lines(self.post_run) shell_text = se.get_script_str() return qheader + shell_text @abc.abstractmethod def submit_to_queue(self, script_file): """ Submits the job to the queue, probably using subprocess or shutil Args: script_file: (str) name of the script file to use (String) Returns: process, queue_id """ @abc.abstractmethod def get_njobs_in_queue(self, username=None): """ returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect) """ #################### # Concrete classes # #################### class ShellAdapter(AbstractQueueAdapter): QTYPE = "shell" QTEMPLATE = """\ #!/bin/bash export MPI_NCPUS=$${MPI_NCPUS} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("MPI_NCPUS", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["MPI_NCPUS"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """mem_per_cpu is not available in ShellAdapter.""" def cancel(self, job_id): return os.system("kill -9 %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: process = Popen(("/bin/bash", script_file), stderr=PIPE) queue_id = process.pid return process, queue_id except: # random error raise self.Error("Random Error ...!") def get_njobs_in_queue(self, username=None): return None class SlurmAdapter(AbstractQueueAdapter): QTYPE = "slurm" QTEMPLATE = """\ #!/bin/bash #SBATCH --ntasks=$${ntasks} #SBATCH --ntasks-per-node=$${ntasks_per_node} #SBATCH --cpus-per-task=$${cpus_per_task} #SBATCH --time=$${time} #SBATCH --partition=$${partition} #SBATCH --account=$${account} #SBATCH --job-name=$${job_name} #SBATCH --nodes=$${nodes} #SBATCH --mem=$${mem} #SBATCH --mem-per-cpu=$${mem_per_cpu} #SBATCH --mail-user=$${mail_user} #SBATCH --mail-type=$${mail_type} #SBATCH --constraint=$${constraint} #SBATCH --gres=$${gres} #SBATCH --requeue=$${requeue} #SBATCH --nodelist=$${nodelist} #SBATCH --propagate=$${propagate} #SBATCH --output=$${_qout_path} #SBATCH --error=$${_qerr_path} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ntasks", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ntasks"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["mem_per_cpu"] = int(mem_mb) # Remove mem if it's defined. self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("scancel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['sbatch', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. SLURM returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split()[3]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code queue_id = None logger.warning('Could not parse job id following slurm...') finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) + "The error response reads: {}".format(process.stderr.read())) raise self.Error(err_msg) except: # random error, e.g. no qsub on machine! raise self.Error('Running sbatch caused an error...') def get_njobs_in_queue(self, username=None): if username is None: username = getpass.getuser() cmd = ['squeue', '-o "%u"', '-u', username] process = Popen(cmd, shell=False, stdout=PIPE) process.wait() # parse the result if process.returncode == 0: # lines should have this form # username # count lines that include the username in it outs = process.stdout.readlines() njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to squeue server? err_msg = ('Error trying to get the number of jobs in the queue using squeue service' + 'The error response reads: {}'.format(process.stderr.read())) logger.critical(err_msg) return None class PbsAdapter(AbstractQueueAdapter): QTYPE = "pbs" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l mppwidth=$${mppwidth} #PBS -l nodes=$${nodes}:ppn=$${ppn} #PBS -N $${job_name} #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" if "ppn" not in self.qparams: self.qparams["ppn"] = 1 ppnode = self.qparams.get("ppn") self.qparams["nodes"] = mpi_ncpus // ppnode def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. PBS returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split('.')[0]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-a', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should have this form # '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09' # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None class SGEAdapter(AbstractQueueAdapter): """ Adapter for Sun Grid Engine (SGE) task submission software. """ QTYPE = "sge" QTEMPLATE = """\ #!/bin/bash #$ -A $${account} #$ -N $${job_name} #$ -l h rt=$${walltime} #$ -pe $${queue} $${ncpus} #$ -cwd #$ -j y #$ -m n #$ -e $${_qerr_path} #$ -o $${_qout_path} #$ -S /bin/bash """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ncpus", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ncpus"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. PBS returns 0 if the job was successful if process.returncode == 0: try: # output should of the form # Your job 1659048 ("NAME_OF_JOB") has been submitted queue_id = int(process.stdout.read().split(' ')[2]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-a', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should have this form # '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09' # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None class QScriptTemplate(string.Template): delimiter = '$$' Modified code that probes queue in SGE. This modification implements the correct way to apply qstat with SGE. """ Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks). Work done by D. Waroquiers, A. Jain, and M. Kocher. The main difference wrt the Fireworks implementation is that the QueueAdapter objects provide a programmatic interface for setting important attributes such as the number of MPI nodes, the number of OMP threads and the memory requirements. This programmatic interface is used by the `TaskManager` for optimizing the parameters of the run before submitting the job (Abinit provides the autoparal option that allows one to get a list of parallel configuration and their expected efficiency). """ from __future__ import print_function, division import os import abc import string import copy import getpass from subprocess import Popen, PIPE from pymatgen.io.abinitio.launcher import ScriptEditor from pymatgen.util.string_utils import is_string import logging logger = logging.getLogger(__name__) __all__ = [ "MpiRunner", "qadapter_class", ] class Command(object): """ From https://gist.github.com/kirpit/1306188 Enables to run subprocess commands in a different thread with TIMEOUT option. Based on jcollado's solution: http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933 """ command = None process = None status = None output, error = '', '' def __init__(self, command): if is_string(command): import shlex command = shlex.split(command) self.command = command def run(self, timeout=None, **kwargs): """ Run a command then return: (status, output, error). """ def target(**kwargs): try: self.process = Popen(self.command, **kwargs) self.output, self.error = self.process.communicate() self.status = self.process.returncode except: import traceback self.error = traceback.format_exc() self.status = -1 # default stdout and stderr if 'stdout' not in kwargs: kwargs['stdout'] = PIPE if 'stderr' not in kwargs: kwargs['stderr'] = PIPE # thread import threading thread = threading.Thread(target=target, kwargs=kwargs) thread.start() thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() return self.status, self.output, self.error class MpiRunner(object): """ This object provides an abstraction for the mpirunner provided by the different MPI libraries. It's main task is handling the different syntax and options supported by the different mpirunners. """ def __init__(self, name, type=None, options=""): self.name = name self.type = None self.options = options def string_to_run(self, executable, mpi_ncpus, stdin=None, stdout=None, stderr=None): stdin = "< " + stdin if stdin is not None else "" stdout = "> " + stdout if stdout is not None else "" stderr = "2> " + stderr if stderr is not None else "" if self.has_mpirun: if self.type is None: # TODO: better treatment of mpirun syntax. #se.add_line('$MPIRUN -n $MPI_NCPUS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR') num_opt = "-n " + str(mpi_ncpus) cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr]) else: raise NotImplementedError("type %s is not supported!") else: #assert mpi_ncpus == 1 cmd = " ".join([executable, stdin, stdout, stderr]) return cmd @property def has_mpirun(self): """True if we are running via mpirun, mpiexec ...""" return self.name is not None def qadapter_class(qtype): """Return the concrete `Adapter` class from a string.""" return {"shell": ShellAdapter, "slurm": SlurmAdapter, "pbs": PbsAdapter, "sge": SGEAdapter, }[qtype.lower()] class QueueAdapterError(Exception): """Error class for exceptions raise by QueueAdapter.""" class AbstractQueueAdapter(object): """ The QueueAdapter is responsible for all interactions with a specific queue management system. This includes handling all details of queue script format as well as queue submission and management. This is the Abstract base class defining the methods that must be implemented by the concrete classes. A user should extend this class with implementations that work on specific queue systems. """ __metaclass__ = abc.ABCMeta Error = QueueAdapterError def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None, pre_run=None, post_run=None, mpi_runner=None): """ Args: setup: String or list of commands to execute during the initial setup. modules: String or list of modules to load before running the application. shell_env: Dictionary with the environment variables to export before running the application. omp_env: Dictionary with the OpenMP variables. pre_run: String or list of commands to execute before launching the calculation. post_run: String or list of commands to execute once the calculation is completed. mpi_runner: Path to the MPI runner or `MpiRunner` instance. None if not used """ # Make defensive copies so that we can change the values at runtime. self.qparams = qparams.copy() if qparams is not None else {} if is_string(setup): setup = [setup] self.setup = setup[:] if setup is not None else [] self.omp_env = omp_env.copy() if omp_env is not None else {} if is_string(modules): modules = [modules] self.modules = modules[:] if modules is not None else [] self.shell_env = shell_env.copy() if shell_env is not None else {} self.mpi_runner = mpi_runner if not isinstance(mpi_runner, MpiRunner): self.mpi_runner = MpiRunner(mpi_runner) if is_string(pre_run): pre_run = [pre_run] self.pre_run = pre_run[:] if pre_run is not None else [] if is_string(post_run): post_run = [post_run] self.post_run = post_run[:] if post_run is not None else [] # Parse the template so that we know the list of supported options. cls = self.__class__ if hasattr(cls, "QTEMPLATE"): # Consistency check. err_msg = "" for param in self.qparams: if param not in self.supported_qparams: err_msg += "Unsupported QUEUE parameter name %s\n" % param if err_msg: raise ValueError(err_msg) def copy(self): return copy.copy(self) def deepcopy(self): return copy.deepcopy(self) @property def supported_qparams(self): """ Dictionary with the supported parameters that can be passed to the queue manager (obtained by parsing QTEMPLATE). """ try: return self._supported_qparams except AttributeError: import re self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE) return self._supported_qparams @property def has_mpirun(self): """True if we are using a mpirunner""" return bool(self.mpi_runner) @property def has_omp(self): """True if we are using OpenMP threads""" return hasattr(self,"omp_env") and bool(getattr(self, "omp_env")) @property def tot_ncpus(self): """Total number of CPUs employed""" return self.mpi_ncpus * self.omp_ncpus @property def omp_ncpus(self): """Number of OpenMP threads.""" if self.has_omp: return self.omp_env["OMP_NUM_THREADS"] else: return 1 @abc.abstractproperty def mpi_ncpus(self): """Number of CPUs used for MPI.""" @abc.abstractmethod def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" #@abc.abstractproperty #def queue_walltime(self): # """Returns the walltime in seconds.""" #@abc.abstractmethod #def set_queue_walltime(self): # """Set the walltime in seconds.""" #@abc.abstractproperty #def mem_per_cpu(self): # """The memory per CPU in Megabytes.""" @abc.abstractmethod def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" #@property #def tot_mem(self): # """Total memory required by the job n Megabytes.""" # return self.mem_per_cpu * self.mpi_ncpus @abc.abstractmethod def cancel(self, job_id): """ Cancel the job. Args: job_id: (in) Job identifier. Returns: Exit status. """ def _make_qheader(self, job_name, qout_path, qerr_path): """Return a string with the options that are passed to the resource manager.""" qtemplate = QScriptTemplate(self.QTEMPLATE) # set substitution dict for replacements into the template and clean null values subs_dict = {k: v for k,v in self.qparams.items() if v is not None} # Set job_name and the names for the stderr and stdout of the # queue manager (note the use of the extensions .qout and .qerr # so that we can easily locate this file. subs_dict['job_name'] = job_name.replace('/','_') subs_dict['_qout_path'] = qout_path subs_dict['_qerr_path'] = qerr_path # might contain unused parameters as leftover $$. unclean_template = qtemplate.safe_substitute(subs_dict) # Remove lines with leftover $$. clean_template = [] for line in unclean_template.split('\n'): if '$$' not in line: clean_template.append(line) return '\n'.join(clean_template) def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path, stdin=None, stdout=None, stderr=None): """ Returns a (multi-line) String representing the queue script, e.g. PBS script. Uses the template_file along with internal parameters to create the script. Args: launch_dir: (str) The directory the job will be launched in. qout_path Path of the Queue manager output file. qerr_path: Path of the Queue manager error file. """ # Construct the header for the Queue Manager. qheader = self._make_qheader(job_name, qout_path, qerr_path) # Add the bash section. se = ScriptEditor() if self.setup: se.add_comment("Setup section") se.add_lines(self.setup) if self.modules: se.add_comment("Load Modules") se.add_line("module purge") se.load_modules(self.modules) if self.has_omp: se.add_comment("OpenMp Environment") se.declare_vars(self.omp_env) if self.shell_env: se.add_comment("Shell Environment") se.declare_vars(self.shell_env) # Cd to launch_dir se.add_line("cd " + os.path.abspath(launch_dir)) if self.pre_run: se.add_comment("Commands before execution") se.add_lines(self.pre_run) # Construct the string to run the executable with MPI and mpi_ncpus. mpi_ncpus = self.mpi_ncpus line = self.mpi_runner.string_to_run(executable, mpi_ncpus, stdin=stdin, stdout=stdout, stderr=stderr) se.add_line(line) if self.post_run: se.add_comment("Commands after execution") se.add_lines(self.post_run) shell_text = se.get_script_str() return qheader + shell_text @abc.abstractmethod def submit_to_queue(self, script_file): """ Submits the job to the queue, probably using subprocess or shutil Args: script_file: (str) name of the script file to use (String) Returns: process, queue_id """ @abc.abstractmethod def get_njobs_in_queue(self, username=None): """ returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect) """ #################### # Concrete classes # #################### class ShellAdapter(AbstractQueueAdapter): QTYPE = "shell" QTEMPLATE = """\ #!/bin/bash export MPI_NCPUS=$${MPI_NCPUS} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("MPI_NCPUS", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["MPI_NCPUS"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """mem_per_cpu is not available in ShellAdapter.""" def cancel(self, job_id): return os.system("kill -9 %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: process = Popen(("/bin/bash", script_file), stderr=PIPE) queue_id = process.pid return process, queue_id except: # random error raise self.Error("Random Error ...!") def get_njobs_in_queue(self, username=None): return None class SlurmAdapter(AbstractQueueAdapter): QTYPE = "slurm" QTEMPLATE = """\ #!/bin/bash #SBATCH --ntasks=$${ntasks} #SBATCH --ntasks-per-node=$${ntasks_per_node} #SBATCH --cpus-per-task=$${cpus_per_task} #SBATCH --time=$${time} #SBATCH --partition=$${partition} #SBATCH --account=$${account} #SBATCH --job-name=$${job_name} #SBATCH --nodes=$${nodes} #SBATCH --mem=$${mem} #SBATCH --mem-per-cpu=$${mem_per_cpu} #SBATCH --mail-user=$${mail_user} #SBATCH --mail-type=$${mail_type} #SBATCH --constraint=$${constraint} #SBATCH --gres=$${gres} #SBATCH --requeue=$${requeue} #SBATCH --nodelist=$${nodelist} #SBATCH --propagate=$${propagate} #SBATCH --output=$${_qout_path} #SBATCH --error=$${_qerr_path} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ntasks", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ntasks"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["mem_per_cpu"] = int(mem_mb) # Remove mem if it's defined. self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("scancel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['sbatch', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. SLURM returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split()[3]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code queue_id = None logger.warning('Could not parse job id following slurm...') finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) + "The error response reads: {}".format(process.stderr.read())) raise self.Error(err_msg) except: # random error, e.g. no qsub on machine! raise self.Error('Running sbatch caused an error...') def get_njobs_in_queue(self, username=None): if username is None: username = getpass.getuser() cmd = ['squeue', '-o "%u"', '-u', username] process = Popen(cmd, shell=False, stdout=PIPE) process.wait() # parse the result if process.returncode == 0: # lines should have this form # username # count lines that include the username in it outs = process.stdout.readlines() njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to squeue server? err_msg = ('Error trying to get the number of jobs in the queue using squeue service' + 'The error response reads: {}'.format(process.stderr.read())) logger.critical(err_msg) return None class PbsAdapter(AbstractQueueAdapter): QTYPE = "pbs" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l mppwidth=$${mppwidth} #PBS -l nodes=$${nodes}:ppn=$${ppn} #PBS -N $${job_name} #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" if "ppn" not in self.qparams: self.qparams["ppn"] = 1 ppnode = self.qparams.get("ppn") self.qparams["nodes"] = mpi_ncpus // ppnode def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. PBS returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split('.')[0]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-a', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should have this form # '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09' # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None class SGEAdapter(AbstractQueueAdapter): """ Adapter for Sun Grid Engine (SGE) task submission software. """ QTYPE = "sge" QTEMPLATE = """\ #!/bin/bash #$ -A $${account} #$ -N $${job_name} #$ -l h rt=$${walltime} #$ -pe $${queue} $${ncpus} #$ -cwd #$ -j y #$ -m n #$ -e $${_qerr_path} #$ -o $${_qout_path} #$ -S /bin/bash """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ncpus", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ncpus"] = mpi_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. SGE returns 0 if the job was successful if process.returncode == 0: try: # output should of the form # Your job 1659048 ("NAME_OF_JOB") has been submitted queue_id = int(process.stdout.read().split(' ')[2]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should contain username # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None class QScriptTemplate(string.Template): delimiter = '$$'
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.Controller import Controller from UM.PluginRegistry import PluginRegistry from UM.Mesh.MeshFileHandler import MeshFileHandler from UM.Resources import Resources from UM.Operations.OperationStack import OperationStack from UM.Event import CallFunctionEvent from UM.Signal import Signal, SignalEmitter from UM.Logger import Logger from UM.Preferences import Preferences from UM.OutputDevice.OutputDeviceManager import OutputDeviceManager from UM.i18n import i18nCatalog from UM.Settings.ContainerStack import ContainerStack import threading import argparse import os import sys ## Central object responsible for running the main event loop and creating other central objects. # # The Application object is a central object for accessing other important objects. It is also # responsible for starting the main event loop. It is passed on to plugins so it can be easily # used to access objects required for those plugins. class Application(SignalEmitter): ## Init method # # \param name \type{string} The name of the application. # \param version \type{string} Version, formatted as major.minor.rev def __init__(self, name, version, **kwargs): if(Application._instance != None): raise ValueError("Duplicate singleton creation") # If the constructor is called and there is no instance, set the instance to self. # This is done because we can't make constructor private Application._instance = self self._application_name = name self._version = version os.putenv("UBUNTU_MENUPROXY","0") # For Ubuntu Unity this makes Qt use its own menu bar rather than pass it on to Unity. Signal._app = self Resources.ApplicationIdentifier = name i18nCatalog.setApplication(self) Resources.addSearchPath(os.path.join(os.path.dirname(sys.executable), "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "share", "uranium", "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "Resources", "uranium", "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "Resources", self.getApplicationName(), "resources")) if not hasattr(sys, "frozen"): Resources.addSearchPath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "resources")) self._main_thread = threading.current_thread() super().__init__(**kwargs) # Call super to make multiple inheritence work. self._renderer = None PluginRegistry.addType("backend", self.setBackend) PluginRegistry.addType("logger", Logger.addLogger) PluginRegistry.addType("extension", self.addExtension) preferences = Preferences.getInstance() preferences.addPreference("general/language", "en") try: preferences.readFromFile(Resources.getPath(Resources.Preferences, self._application_name + ".cfg")) except FileNotFoundError: pass self._controller = Controller(self) self._mesh_file_handler = MeshFileHandler() self._extensions = [] self._backend = None self._output_device_manager = OutputDeviceManager() self._required_plugins = [] self._operation_stack = OperationStack() self._plugin_registry = PluginRegistry.getInstance() self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "lib", "uranium")) self._plugin_registry.addPluginLocation(os.path.join(os.path.dirname(sys.executable), "plugins")) self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "Resources", "uranium", "plugins")) self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "Resources", self.getApplicationName(), "plugins")) # Locally installed plugins self._plugin_registry.addPluginLocation(os.path.join(Resources.getStoragePath(Resources.Resources), "plugins")) if not hasattr(sys, "frozen"): self._plugin_registry.addPluginLocation(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "plugins")) self._plugin_registry.setApplication(self) self._parsed_command_line = None self.parseCommandLine() self._visible_messages = [] self._message_lock = threading.Lock() self.showMessageSignal.connect(self.showMessage) self.hideMessageSignal.connect(self.hideMessage) self._active_container_stack = ContainerStack("empty") ## Emitted when the application window was closed and we need to shut down the application applicationShuttingDown = Signal() showMessageSignal = Signal() hideMessageSignal = Signal() activeContainerStackChanged = Signal() def setActiveContainerStack(self, stack): self._active_container_stack = stack self.activeContainerStackChanged.emit() def getActiveContainerStack(self): return self._active_container_stack def hideMessage(self, message): raise NotImplementedError def showMessage(self, message): raise NotImplementedError ## Get the version of the application # \returns version \type{string} def getVersion(self): return self._version ## Add a message to the visible message list so it will be displayed. # This should only be called by message object itself. # To show a message, simply create it and call its .show() function. # \param message \type{Message} message object # \sa Message::show() #def showMessage(self, message): # with self._message_lock: # if message not in self._visible_messages: # self._visible_messages.append(message) # self.visibleMessageAdded.emit(message) visibleMessageAdded = Signal() ## Remove a message from the visible message list so it will no longer be displayed. # This should only be called by message object itself. # in principle, this should only be called by the message itself (hide) # \param message \type{Message} message object # \sa Message::hide() #def hideMessage(self, message): # with self._message_lock: # if message in self._visible_messages: # self._visible_messages.remove(message) # self.visibleMessageRemoved.emit(message) ## Hide message by ID (as provided by built-in id function) # \param message_id \type{long} def hideMessageById(self, message_id): found_message = None with self._message_lock: for message in self._visible_messages: if id(message) == message_id: found_message = message if found_message is not None: self.hideMessageSignal.emit(found_message) visibleMessageRemoved = Signal() ## Get list of all visible messages # \returns visible_messages \type{list} def getVisibleMessages(self): with self._message_lock: return self._visible_messages ## Function that needs to be overriden by child classes with a list of plugin it needs. def _loadPlugins(self): pass def getCommandLineOption(self, name, default = None): if not self._parsed_command_line: self.parseCommandLine() Logger.log("d", "Command line options: %s", str(self._parsed_command_line)) return self._parsed_command_line.get(name, default) ## Get name of the application. # \returns application_name \type{string} def getApplicationName(self): return self._application_name ## Set name of the application. # \param application_name \type{string} def setApplicationName(self, application_name): self._application_name = application_name def getApplicationLanguage(self): override_lang = os.getenv("URANIUM_LANGUAGE") if override_lang: return override_lang preflang = Preferences.getInstance().getValue("general/language") if preflang: return preflang env_lang = os.getenv("LANGUAGE") if env_lang: return env_lang return "en" ## Application has a list of plugins that it *must* have. If it does not have these, it cannot function. # These plugins can not be disabled in any way. # \returns required_plugins \type{list} def getRequiredPlugins(self): return self._required_plugins ## Set the plugins that the application *must* have in order to function. # \param plugin_names \type{list} List of strings with the names of the required plugins def setRequiredPlugins(self, plugin_names): self._required_plugins = plugin_names ## Set the backend of the application (the program that does the heavy lifting). # \param backend \type{Backend} def setBackend(self, backend): self._backend = backend ## Get the backend of the application (the program that does the heavy lifting). # \returns Backend \type{Backend} def getBackend(self): return self._backend ## Get the PluginRegistry of this application. # \returns PluginRegistry \type{PluginRegistry} def getPluginRegistry(self): return self._plugin_registry ## Get the Controller of this application. # \returns Controller \type{Controller} def getController(self): return self._controller ## Get the MeshFileHandler of this application. # \returns MeshFileHandler \type{MeshFileHandler} def getMeshFileHandler(self): return self._mesh_file_handler def getOperationStack(self): return self._operation_stack def getOutputDeviceManager(self): return self._output_device_manager ## Run the main eventloop. # This method should be reimplemented by subclasses to start the main event loop. # \exception NotImplementedError def run(self): raise NotImplementedError("Run must be implemented by application") ## Return an application-specific Renderer object. # \exception NotImplementedError def getRenderer(self): raise NotImplementedError("getRenderer must be implemented by subclasses.") ## Post a function event onto the event loop. # # This takes a CallFunctionEvent object and puts it into the actual event loop. # \exception NotImplementedError def functionEvent(self, event): raise NotImplementedError("functionEvent must be implemented by subclasses.") ## Call a function the next time the event loop runs. # # \param function The function to call. # \param args The positional arguments to pass to the function. # \param kwargs The keyword arguments to pass to the function. def callLater(self, function, *args, **kwargs): event = CallFunctionEvent(function, args, kwargs) self.functionEvent(event) ## Get the application"s main thread. def getMainThread(self): return self._main_thread ## Return the singleton instance of the application object @classmethod def getInstance(cls): # Note: Explicit use of class name to prevent issues with inheritance. if Application._instance is None: Application._instance = cls() return Application._instance def parseCommandLine(self): parser = argparse.ArgumentParser(prog = self.getApplicationName()) parser.add_argument("--version", action="version", version="%(prog)s {0}".format(self.getVersion())) parser.add_argument("--external-backend", dest="external-backend", action="store_true", default=False, help="Use an externally started backend instead of starting it automatically.") self.addCommandLineOptions(parser) self._parsed_command_line = vars(parser.parse_args()) ## Can be overridden to add additional command line options to the parser. # # \param parser \type{argparse.ArgumentParser} The parser that will parse the command line. def addCommandLineOptions(self, parser): pass def addExtension(self, extension): self._extensions.append(extension) def getExtensions(self): return self._extensions @staticmethod def getInstallPrefix(): if "python" in os.path.basename(sys.executable): return os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) else: return os.path.abspath(os.path.join(os.path.dirname(sys.executable), "..")) _instance = None Renamed active container stack to globalContainerstack CURA-1278 # Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.Controller import Controller from UM.PluginRegistry import PluginRegistry from UM.Mesh.MeshFileHandler import MeshFileHandler from UM.Resources import Resources from UM.Operations.OperationStack import OperationStack from UM.Event import CallFunctionEvent from UM.Signal import Signal, SignalEmitter from UM.Logger import Logger from UM.Preferences import Preferences from UM.OutputDevice.OutputDeviceManager import OutputDeviceManager from UM.i18n import i18nCatalog from UM.Settings.ContainerStack import ContainerStack import threading import argparse import os import sys ## Central object responsible for running the main event loop and creating other central objects. # # The Application object is a central object for accessing other important objects. It is also # responsible for starting the main event loop. It is passed on to plugins so it can be easily # used to access objects required for those plugins. class Application(SignalEmitter): ## Init method # # \param name \type{string} The name of the application. # \param version \type{string} Version, formatted as major.minor.rev def __init__(self, name, version, **kwargs): if(Application._instance != None): raise ValueError("Duplicate singleton creation") # If the constructor is called and there is no instance, set the instance to self. # This is done because we can't make constructor private Application._instance = self self._application_name = name self._version = version os.putenv("UBUNTU_MENUPROXY","0") # For Ubuntu Unity this makes Qt use its own menu bar rather than pass it on to Unity. Signal._app = self Resources.ApplicationIdentifier = name i18nCatalog.setApplication(self) Resources.addSearchPath(os.path.join(os.path.dirname(sys.executable), "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "share", "uranium", "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "Resources", "uranium", "resources")) Resources.addSearchPath(os.path.join(Application.getInstallPrefix(), "Resources", self.getApplicationName(), "resources")) if not hasattr(sys, "frozen"): Resources.addSearchPath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "resources")) self._main_thread = threading.current_thread() super().__init__(**kwargs) # Call super to make multiple inheritence work. self._renderer = None PluginRegistry.addType("backend", self.setBackend) PluginRegistry.addType("logger", Logger.addLogger) PluginRegistry.addType("extension", self.addExtension) preferences = Preferences.getInstance() preferences.addPreference("general/language", "en") try: preferences.readFromFile(Resources.getPath(Resources.Preferences, self._application_name + ".cfg")) except FileNotFoundError: pass self._controller = Controller(self) self._mesh_file_handler = MeshFileHandler() self._extensions = [] self._backend = None self._output_device_manager = OutputDeviceManager() self._required_plugins = [] self._operation_stack = OperationStack() self._plugin_registry = PluginRegistry.getInstance() self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "lib", "uranium")) self._plugin_registry.addPluginLocation(os.path.join(os.path.dirname(sys.executable), "plugins")) self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "Resources", "uranium", "plugins")) self._plugin_registry.addPluginLocation(os.path.join(Application.getInstallPrefix(), "Resources", self.getApplicationName(), "plugins")) # Locally installed plugins self._plugin_registry.addPluginLocation(os.path.join(Resources.getStoragePath(Resources.Resources), "plugins")) if not hasattr(sys, "frozen"): self._plugin_registry.addPluginLocation(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "plugins")) self._plugin_registry.setApplication(self) self._parsed_command_line = None self.parseCommandLine() self._visible_messages = [] self._message_lock = threading.Lock() self.showMessageSignal.connect(self.showMessage) self.hideMessageSignal.connect(self.hideMessage) self._global_container_stack = ContainerStack("empty") ## Emitted when the application window was closed and we need to shut down the application applicationShuttingDown = Signal() showMessageSignal = Signal() hideMessageSignal = Signal() globalContainerStackChanged = Signal() def setGlobalContainerStack(self, stack): self._global_container_stack = stack self.globalContainerStackChanged.emit() def getGlobalContainerStack(self): return self._global_container_stack def hideMessage(self, message): raise NotImplementedError def showMessage(self, message): raise NotImplementedError ## Get the version of the application # \returns version \type{string} def getVersion(self): return self._version ## Add a message to the visible message list so it will be displayed. # This should only be called by message object itself. # To show a message, simply create it and call its .show() function. # \param message \type{Message} message object # \sa Message::show() #def showMessage(self, message): # with self._message_lock: # if message not in self._visible_messages: # self._visible_messages.append(message) # self.visibleMessageAdded.emit(message) visibleMessageAdded = Signal() ## Remove a message from the visible message list so it will no longer be displayed. # This should only be called by message object itself. # in principle, this should only be called by the message itself (hide) # \param message \type{Message} message object # \sa Message::hide() #def hideMessage(self, message): # with self._message_lock: # if message in self._visible_messages: # self._visible_messages.remove(message) # self.visibleMessageRemoved.emit(message) ## Hide message by ID (as provided by built-in id function) # \param message_id \type{long} def hideMessageById(self, message_id): found_message = None with self._message_lock: for message in self._visible_messages: if id(message) == message_id: found_message = message if found_message is not None: self.hideMessageSignal.emit(found_message) visibleMessageRemoved = Signal() ## Get list of all visible messages # \returns visible_messages \type{list} def getVisibleMessages(self): with self._message_lock: return self._visible_messages ## Function that needs to be overriden by child classes with a list of plugin it needs. def _loadPlugins(self): pass def getCommandLineOption(self, name, default = None): if not self._parsed_command_line: self.parseCommandLine() Logger.log("d", "Command line options: %s", str(self._parsed_command_line)) return self._parsed_command_line.get(name, default) ## Get name of the application. # \returns application_name \type{string} def getApplicationName(self): return self._application_name ## Set name of the application. # \param application_name \type{string} def setApplicationName(self, application_name): self._application_name = application_name def getApplicationLanguage(self): override_lang = os.getenv("URANIUM_LANGUAGE") if override_lang: return override_lang preflang = Preferences.getInstance().getValue("general/language") if preflang: return preflang env_lang = os.getenv("LANGUAGE") if env_lang: return env_lang return "en" ## Application has a list of plugins that it *must* have. If it does not have these, it cannot function. # These plugins can not be disabled in any way. # \returns required_plugins \type{list} def getRequiredPlugins(self): return self._required_plugins ## Set the plugins that the application *must* have in order to function. # \param plugin_names \type{list} List of strings with the names of the required plugins def setRequiredPlugins(self, plugin_names): self._required_plugins = plugin_names ## Set the backend of the application (the program that does the heavy lifting). # \param backend \type{Backend} def setBackend(self, backend): self._backend = backend ## Get the backend of the application (the program that does the heavy lifting). # \returns Backend \type{Backend} def getBackend(self): return self._backend ## Get the PluginRegistry of this application. # \returns PluginRegistry \type{PluginRegistry} def getPluginRegistry(self): return self._plugin_registry ## Get the Controller of this application. # \returns Controller \type{Controller} def getController(self): return self._controller ## Get the MeshFileHandler of this application. # \returns MeshFileHandler \type{MeshFileHandler} def getMeshFileHandler(self): return self._mesh_file_handler def getOperationStack(self): return self._operation_stack def getOutputDeviceManager(self): return self._output_device_manager ## Run the main eventloop. # This method should be reimplemented by subclasses to start the main event loop. # \exception NotImplementedError def run(self): raise NotImplementedError("Run must be implemented by application") ## Return an application-specific Renderer object. # \exception NotImplementedError def getRenderer(self): raise NotImplementedError("getRenderer must be implemented by subclasses.") ## Post a function event onto the event loop. # # This takes a CallFunctionEvent object and puts it into the actual event loop. # \exception NotImplementedError def functionEvent(self, event): raise NotImplementedError("functionEvent must be implemented by subclasses.") ## Call a function the next time the event loop runs. # # \param function The function to call. # \param args The positional arguments to pass to the function. # \param kwargs The keyword arguments to pass to the function. def callLater(self, function, *args, **kwargs): event = CallFunctionEvent(function, args, kwargs) self.functionEvent(event) ## Get the application"s main thread. def getMainThread(self): return self._main_thread ## Return the singleton instance of the application object @classmethod def getInstance(cls): # Note: Explicit use of class name to prevent issues with inheritance. if Application._instance is None: Application._instance = cls() return Application._instance def parseCommandLine(self): parser = argparse.ArgumentParser(prog = self.getApplicationName()) parser.add_argument("--version", action="version", version="%(prog)s {0}".format(self.getVersion())) parser.add_argument("--external-backend", dest="external-backend", action="store_true", default=False, help="Use an externally started backend instead of starting it automatically.") self.addCommandLineOptions(parser) self._parsed_command_line = vars(parser.parse_args()) ## Can be overridden to add additional command line options to the parser. # # \param parser \type{argparse.ArgumentParser} The parser that will parse the command line. def addCommandLineOptions(self, parser): pass def addExtension(self, extension): self._extensions.append(extension) def getExtensions(self): return self._extensions @staticmethod def getInstallPrefix(): if "python" in os.path.basename(sys.executable): return os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) else: return os.path.abspath(os.path.join(os.path.dirname(sys.executable), "..")) _instance = None
plot: Update queue delay plot
"""Read Minigo game examples from a Bigtable. """ import collections import operator import os import re import struct import time import numpy as np from google.cloud import bigtable from google.cloud.bigtable import row_filters import tensorflow as tf import utils # Constants ROW_PREFIX = 'g_{:0>10}_' ROWCOUNT_PREFIX = 'ct_{:0>10}_' ## Column family and qualifier constants. #### Column Families METADATA = 'metadata' #### Column Qualifiers #### Note that in CBT, families are strings and qualifiers are bytes. TABLE_STATE = b'table_state' WAIT_CELL = b'wait_for_game_number' MOVE_COUNT = b'move_count' # Patterns _game_row_key = re.compile(r'g_(\d+)_m_(\d+)') def cbt_intvalue(value): """Decode a big-endian uint64. Cloud Bigtable stores integers as big-endian uint64, and performs this translation when integers are being set. But when being read, the values need to be decoded. """ return int(struct.unpack('>q', value)[0]) def make_single_array(ds, batch_size=8*1024): """Create a single numpy array from a dataset. Args: ds: a TF Dataset. batch_size: how many elements to read per pass Returns: a single numpy array. """ batches = [] with tf.Session() as sess: ds = ds.batch(batch_size) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() try: while True: batches.append(sess.run(get_next)) except tf.errors.OutOfRangeError: pass return np.concatenate(batches) def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024): """Given dataset of key names, return histogram of moves/game. Move counts are written by the game players, so this is mostly useful for repair or backfill. Args: sess: TF session ds: TF dataset containing game move keys. batch_size: performance tuning parameter """ ds = ds.batch(batch_size) # Turns 'g_0000001234_m_133' into 'g_0000001234' ds = ds.map(lambda x: tf.strings.substr(x, 0, 12)) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() h = collections.Counter() try: while True: h.update(sess.run(get_next)) except tf.errors.OutOfRangeError: pass # NOTE: Cannot be truly sure the count is right till the end. return h def _game_keys_as_array(ds): """Turn keys of a Bigtable dataset into an array. Take g_GGG_m_MMM and create GGG.MMM numbers. Valuable when visualizing the distribution of a given dataset in the game keyspace. """ ds = ds.map(lambda row_key, cell: row_key) # want 'g_0000001234_m_133' is '0000001234.133' and so forth ds = ds.map(lambda x: tf.strings.to_number(tf.strings.substr(x, 2, 10) + '.' + tf.strings.substr(x, 15, 3), out_type=tf.float64)) return make_single_array(ds) class GameQueue: """Queue of games stored in a Cloud Bigtable. The state of the table is stored in the `table_state` row, which includes the columns `metadata:game_counter`. """ def __init__(self, project_name, instance_name, table_name): """Constructor. Args: project_name: string name of GCP project having table. instance_name: string name of CBT instance in project. table_name: string name of CBT table in instance. """ self.bt_table = bigtable.Client( project_name).instance( instance_name).table( table_name) self.tf_table = tf.contrib.cloud.BigtableClient( project_name, instance_name).table( table_name) def latest_game_number(self): """Return the number of the next game to be written.""" game_counter = b'game_counter' table_state = self.bt_table.read_row( TABLE_STATE, filter_=row_filters.ColumnRangeFilter( METADATA, game_counter, game_counter)) return cbt_intvalue(table_state.cell_value(METADATA, game_counter)) def bleakest_moves(self, start_game, end_game): """Given a range of games, return the bleakest moves. Returns a list of (game, move, q) sorted by q. """ bleak = b'bleakest_q' rows = self.bt_table.read_rows( ROW_PREFIX.format(start_game), ROW_PREFIX.format(end_game), filter_=row_filters.ColumnRangeFilter( METADATA, bleak, bleak)) def parse(r): rk = str(r.row_key, 'utf-8') g, m = _game_row_key.match(rk).groups() q = r.cell_value(METADATA, bleak) return int(g), int(m), float(q) return sorted([parse(r) for r in rows], key=operator.itemgetter(2)) def require_fresh_games(self, number_fresh): """Require a given number of fresh games to be played. Args: number_fresh: integer, number of new fresh games needed Increments the cell `table_state=metadata:wait_for_game_number` by the given number of games. This will cause `self.wait_for_fresh_games()` to block until the game counter has reached this number. """ latest = self.latest_game_number() table_state = self.bt_table.row(TABLE_STATE) table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh)) table_state.commit() def wait_for_fresh_games(self, poll_interval=15.0): """Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:game_counter is at least the value in that cell. """ table_state = self.bt_table.read_row( TABLE_STATE, filter_=row_filters.ColumnRangeFilter( METADATA, WAIT_CELL, WAIT_CELL)) if table_state is None: utils.dbg('No waiting for new games needed; ' 'wait_for_game_number column not in table_state') return value = table_state.cell_value(METADATA, WAIT_CELL) if not value: utils.dbg('No waiting for new games needed; ' 'no value in wait_for_game_number cell ' 'in table_state') return wait_until_game = cbt_intvalue(value) latest_game = self.latest_game_number() while latest_game < wait_until_game: utils.dbg('Latest game {} not yet at required game {}'. format(latest_game, wait_until_game)) time.sleep(poll_interval) latest_game = self.latest_game_number() def count_moves_in_game_range(self, game_begin, game_end): """Count the total moves in a game range. Args: game_begin: integer, starting game game_end: integer, ending game Uses the `ct_` keyspace for rapid move summary. """ rows = self.bt_table.read_rows( ROWCOUNT_PREFIX.format(game_begin), ROWCOUNT_PREFIX.format(game_end), filter_=row_filters.ColumnRangeFilter( METADATA, MOVE_COUNT, MOVE_COUNT)) return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows]) def moves_from_games(self, start_game, end_game, moves, shuffle, column_family, column): """Dataset of samples and/or shuffled moves from game range. Args: n: an integer indicating how many past games should be sourced. moves: an integer indicating how many moves should be sampled from those N games. column_family: name of the column family containing move examples. column: name of the column containing move examples. shuffle: if True, shuffle the selected move examples. Returns: A dataset containing no more than `moves` examples, sampled randomly from the last `n` games in the table. """ start_row = ROW_PREFIX.format(start_game) end_row = ROW_PREFIX.format(end_game) # NOTE: Choose a probability high enough to guarantee at least the # required number of moves, by using a slightly lower estimate # of the total moves, then trimming the result. total_moves = self.count_moves_in_game_range(start_game, end_game) probability = moves / (total_moves * 0.99) utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f' % ( start_row, end_row, total_moves, probability)) shards = 8 ds = self.tf_table.parallel_scan_range(start_row, end_row, probability=probability, num_parallel_scans=shards, columns=[(column_family, column)]) if shuffle: rds = tf.data.Dataset.from_tensor_slices( tf.random_shuffle(tf.range(0, shards, dtype=tf.int64))) ds = rds.apply( tf.contrib.data.parallel_interleave( lambda x: ds.shard(shards, x), cycle_length=shards, block_length=1024)) ds = ds.shuffle(shards * 1024 * 2) ds = ds.take(moves) return ds def _write_move_counts(self, sess, h): """Add move counts from the given histogram to the table. Used to update the move counts in an existing table. Should not be needed except for backfill or repair. Args: sess: TF session to use for doing a Bigtable write. tf_table: TF Cloud Bigtable to use for writing. h: a dictionary keyed by game row prefix ("g_0023561") whose values are the move counts for each game. """ def gen(): for k, v in h.items(): # The keys in the histogram may be of type 'bytes' k = str(k, 'utf-8') vs = str(v) yield (k.replace('g_', 'ct_') + '_%d' % v, vs) yield (k + '_m_000', vs) mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string)) wr_op = self.tf_table.write(mc, column_families=[METADATA], columns=[MOVE_COUNT]) sess.run(wr_op) def update_move_counts(self, start_game, end_game, interval=1000): """Used to update the move_count cell for older games. Should not be needed except for backfill or repair. move_count cells will be updated in both g_<game_id>_m_000 rows and ct_<game_id>_<move_count> rows. """ for g in range(start_game, end_game, interval): with tf.Session() as sess: start_row = ROW_PREFIX.format(g) end_row = ROW_PREFIX.format(g + interval) print('Range:', start_row, end_row) start_time = time.time() ds = self.tf_table.keys_by_range_dataset(start_row, end_row) h = _histogram_move_keys_by_game(sess, ds) self._write_move_counts(sess, h) end_time = time.time() elapsed = end_time - start_time print(' games/sec:', len(h)/elapsed) # TODO(president.jackson): document these _games = GameQueue(os.environ['PROJECT'], os.environ['CBT_INSTANCE'], os.environ['CBT_TABLE']) def get_unparsed_moves_from_last_n_games(n, moves=2**21, shuffle=True, column_family='tfexample', column='example'): """Get a dataset of serialized TFExamples from the last N games. Args: n: an integer indicating how many past games should be sourced. moves: an integer indicating how many moves should be sampled from those N games. column_family: name of the column family containing move examples. column: name of the column containing move examples. shuffle: if True, shuffle the selected move examples. Returns: A dataset containing no more than `moves` examples, sampled randomly from the last `n` games in the table. """ _games.wait_for_fresh_games() latest_game = int(_games.latest_game_number()) utils.dbg('Latest game: %s' % latest_game) if latest_game == 0: raise ValueError('Cannot find a latest game in the table') start = int(max(0, latest_game - n)) ds = _games.moves_from_games(start, latest_game, moves, shuffle, column_family, column) return ds.map(lambda row_name, s: s) def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8): """Count and return all the elements in the given dataset. Debugging function. The elements in a dataset cannot be counted without enumerating all of them. By counting in batch and in parallel, this method allows rapid traversal of the dataset. Args: ds: The dataset whose elements should be counted. batch_size: the number of elements to count a a time. parallel_batch: how many batches to count in parallel. Returns: The number of elements in the dataset. """ with tf.Session() as sess: dsc = ds.apply(tf.contrib.data.enumerate_dataset()) dsc = dsc.apply( tf.contrib.data.map_and_batch(lambda c, v: c, batch_size, num_parallel_batches=parallel_batch)) iterator = dsc.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() counted = 0 try: while True: # The numbers in the tensors are 0-based indicies, # so add 1 to get the number counted. counted = sess.run(tf.reduce_max(get_next)) + 1 utils.dbg('Counted so far: %d' % counted) except tf.errors.OutOfRangeError: pass utils.dbg('Counted total: %d' % counted) return counted Document GameQueue environment variables """Read Minigo game examples from a Bigtable. """ import collections import operator import os import re import struct import time import numpy as np from google.cloud import bigtable from google.cloud.bigtable import row_filters import tensorflow as tf import utils # Constants ROW_PREFIX = 'g_{:0>10}_' ROWCOUNT_PREFIX = 'ct_{:0>10}_' ## Column family and qualifier constants. #### Column Families METADATA = 'metadata' #### Column Qualifiers #### Note that in CBT, families are strings and qualifiers are bytes. TABLE_STATE = b'table_state' WAIT_CELL = b'wait_for_game_number' MOVE_COUNT = b'move_count' # Patterns _game_row_key = re.compile(r'g_(\d+)_m_(\d+)') def cbt_intvalue(value): """Decode a big-endian uint64. Cloud Bigtable stores integers as big-endian uint64, and performs this translation when integers are being set. But when being read, the values need to be decoded. """ return int(struct.unpack('>q', value)[0]) def make_single_array(ds, batch_size=8*1024): """Create a single numpy array from a dataset. Args: ds: a TF Dataset. batch_size: how many elements to read per pass Returns: a single numpy array. """ batches = [] with tf.Session() as sess: ds = ds.batch(batch_size) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() try: while True: batches.append(sess.run(get_next)) except tf.errors.OutOfRangeError: pass return np.concatenate(batches) def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024): """Given dataset of key names, return histogram of moves/game. Move counts are written by the game players, so this is mostly useful for repair or backfill. Args: sess: TF session ds: TF dataset containing game move keys. batch_size: performance tuning parameter """ ds = ds.batch(batch_size) # Turns 'g_0000001234_m_133' into 'g_0000001234' ds = ds.map(lambda x: tf.strings.substr(x, 0, 12)) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() h = collections.Counter() try: while True: h.update(sess.run(get_next)) except tf.errors.OutOfRangeError: pass # NOTE: Cannot be truly sure the count is right till the end. return h def _game_keys_as_array(ds): """Turn keys of a Bigtable dataset into an array. Take g_GGG_m_MMM and create GGG.MMM numbers. Valuable when visualizing the distribution of a given dataset in the game keyspace. """ ds = ds.map(lambda row_key, cell: row_key) # want 'g_0000001234_m_133' is '0000001234.133' and so forth ds = ds.map(lambda x: tf.strings.to_number(tf.strings.substr(x, 2, 10) + '.' + tf.strings.substr(x, 15, 3), out_type=tf.float64)) return make_single_array(ds) class GameQueue: """Queue of games stored in a Cloud Bigtable. The state of the table is stored in the `table_state` row, which includes the columns `metadata:game_counter`. """ def __init__(self, project_name, instance_name, table_name): """Constructor. Args: project_name: string name of GCP project having table. instance_name: string name of CBT instance in project. table_name: string name of CBT table in instance. """ self.bt_table = bigtable.Client( project_name).instance( instance_name).table( table_name) self.tf_table = tf.contrib.cloud.BigtableClient( project_name, instance_name).table( table_name) def latest_game_number(self): """Return the number of the next game to be written.""" game_counter = b'game_counter' table_state = self.bt_table.read_row( TABLE_STATE, filter_=row_filters.ColumnRangeFilter( METADATA, game_counter, game_counter)) return cbt_intvalue(table_state.cell_value(METADATA, game_counter)) def bleakest_moves(self, start_game, end_game): """Given a range of games, return the bleakest moves. Returns a list of (game, move, q) sorted by q. """ bleak = b'bleakest_q' rows = self.bt_table.read_rows( ROW_PREFIX.format(start_game), ROW_PREFIX.format(end_game), filter_=row_filters.ColumnRangeFilter( METADATA, bleak, bleak)) def parse(r): rk = str(r.row_key, 'utf-8') g, m = _game_row_key.match(rk).groups() q = r.cell_value(METADATA, bleak) return int(g), int(m), float(q) return sorted([parse(r) for r in rows], key=operator.itemgetter(2)) def require_fresh_games(self, number_fresh): """Require a given number of fresh games to be played. Args: number_fresh: integer, number of new fresh games needed Increments the cell `table_state=metadata:wait_for_game_number` by the given number of games. This will cause `self.wait_for_fresh_games()` to block until the game counter has reached this number. """ latest = self.latest_game_number() table_state = self.bt_table.row(TABLE_STATE) table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh)) table_state.commit() def wait_for_fresh_games(self, poll_interval=15.0): """Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:game_counter is at least the value in that cell. """ table_state = self.bt_table.read_row( TABLE_STATE, filter_=row_filters.ColumnRangeFilter( METADATA, WAIT_CELL, WAIT_CELL)) if table_state is None: utils.dbg('No waiting for new games needed; ' 'wait_for_game_number column not in table_state') return value = table_state.cell_value(METADATA, WAIT_CELL) if not value: utils.dbg('No waiting for new games needed; ' 'no value in wait_for_game_number cell ' 'in table_state') return wait_until_game = cbt_intvalue(value) latest_game = self.latest_game_number() while latest_game < wait_until_game: utils.dbg('Latest game {} not yet at required game {}'. format(latest_game, wait_until_game)) time.sleep(poll_interval) latest_game = self.latest_game_number() def count_moves_in_game_range(self, game_begin, game_end): """Count the total moves in a game range. Args: game_begin: integer, starting game game_end: integer, ending game Uses the `ct_` keyspace for rapid move summary. """ rows = self.bt_table.read_rows( ROWCOUNT_PREFIX.format(game_begin), ROWCOUNT_PREFIX.format(game_end), filter_=row_filters.ColumnRangeFilter( METADATA, MOVE_COUNT, MOVE_COUNT)) return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows]) def moves_from_games(self, start_game, end_game, moves, shuffle, column_family, column): """Dataset of samples and/or shuffled moves from game range. Args: n: an integer indicating how many past games should be sourced. moves: an integer indicating how many moves should be sampled from those N games. column_family: name of the column family containing move examples. column: name of the column containing move examples. shuffle: if True, shuffle the selected move examples. Returns: A dataset containing no more than `moves` examples, sampled randomly from the last `n` games in the table. """ start_row = ROW_PREFIX.format(start_game) end_row = ROW_PREFIX.format(end_game) # NOTE: Choose a probability high enough to guarantee at least the # required number of moves, by using a slightly lower estimate # of the total moves, then trimming the result. total_moves = self.count_moves_in_game_range(start_game, end_game) probability = moves / (total_moves * 0.99) utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f' % ( start_row, end_row, total_moves, probability)) shards = 8 ds = self.tf_table.parallel_scan_range(start_row, end_row, probability=probability, num_parallel_scans=shards, columns=[(column_family, column)]) if shuffle: rds = tf.data.Dataset.from_tensor_slices( tf.random_shuffle(tf.range(0, shards, dtype=tf.int64))) ds = rds.apply( tf.contrib.data.parallel_interleave( lambda x: ds.shard(shards, x), cycle_length=shards, block_length=1024)) ds = ds.shuffle(shards * 1024 * 2) ds = ds.take(moves) return ds def _write_move_counts(self, sess, h): """Add move counts from the given histogram to the table. Used to update the move counts in an existing table. Should not be needed except for backfill or repair. Args: sess: TF session to use for doing a Bigtable write. tf_table: TF Cloud Bigtable to use for writing. h: a dictionary keyed by game row prefix ("g_0023561") whose values are the move counts for each game. """ def gen(): for k, v in h.items(): # The keys in the histogram may be of type 'bytes' k = str(k, 'utf-8') vs = str(v) yield (k.replace('g_', 'ct_') + '_%d' % v, vs) yield (k + '_m_000', vs) mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string)) wr_op = self.tf_table.write(mc, column_families=[METADATA], columns=[MOVE_COUNT]) sess.run(wr_op) def update_move_counts(self, start_game, end_game, interval=1000): """Used to update the move_count cell for older games. Should not be needed except for backfill or repair. move_count cells will be updated in both g_<game_id>_m_000 rows and ct_<game_id>_<move_count> rows. """ for g in range(start_game, end_game, interval): with tf.Session() as sess: start_row = ROW_PREFIX.format(g) end_row = ROW_PREFIX.format(g + interval) print('Range:', start_row, end_row) start_time = time.time() ds = self.tf_table.keys_by_range_dataset(start_row, end_row) h = _histogram_move_keys_by_game(sess, ds) self._write_move_counts(sess, h) end_time = time.time() elapsed = end_time - start_time print(' games/sec:', len(h)/elapsed) # PROJECT: the GCP project in which the Cloud Bigtable is located. # CBT_INSTANCE: identifier of Cloud Bigtable instance in PROJECT. # CBT_TABLE: identifier of Cloud Bigtable table in CBT_INSTANCE. _games = GameQueue(os.environ['PROJECT'], os.environ['CBT_INSTANCE'], os.environ['CBT_TABLE']) def get_unparsed_moves_from_last_n_games(n, moves=2**21, shuffle=True, column_family='tfexample', column='example'): """Get a dataset of serialized TFExamples from the last N games. Args: n: an integer indicating how many past games should be sourced. moves: an integer indicating how many moves should be sampled from those N games. column_family: name of the column family containing move examples. column: name of the column containing move examples. shuffle: if True, shuffle the selected move examples. Returns: A dataset containing no more than `moves` examples, sampled randomly from the last `n` games in the table. """ _games.wait_for_fresh_games() latest_game = int(_games.latest_game_number()) utils.dbg('Latest game: %s' % latest_game) if latest_game == 0: raise ValueError('Cannot find a latest game in the table') start = int(max(0, latest_game - n)) ds = _games.moves_from_games(start, latest_game, moves, shuffle, column_family, column) return ds.map(lambda row_name, s: s) def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8): """Count and return all the elements in the given dataset. Debugging function. The elements in a dataset cannot be counted without enumerating all of them. By counting in batch and in parallel, this method allows rapid traversal of the dataset. Args: ds: The dataset whose elements should be counted. batch_size: the number of elements to count a a time. parallel_batch: how many batches to count in parallel. Returns: The number of elements in the dataset. """ with tf.Session() as sess: dsc = ds.apply(tf.contrib.data.enumerate_dataset()) dsc = dsc.apply( tf.contrib.data.map_and_batch(lambda c, v: c, batch_size, num_parallel_batches=parallel_batch)) iterator = dsc.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() counted = 0 try: while True: # The numbers in the tensors are 0-based indicies, # so add 1 to get the number counted. counted = sess.run(tf.reduce_max(get_next)) + 1 utils.dbg('Counted so far: %d' % counted) except tf.errors.OutOfRangeError: pass utils.dbg('Counted total: %d' % counted) return counted
from django.contrib import admin from django.conf.urls import patterns from django.http import HttpResponseRedirect from game.tasks import validate_bot import itertools # Register your models here. from game.models import ( Bot, Challenge, UserProfile, FinalChallenge, ) from game.tasks import run_match class UserProfileAdmin(admin.ModelAdmin): list_display = ('user', 'score', 'current_bot', 'code_update_date') list_filter = ('current_bot') class BotAdmin(admin.ModelAdmin): actions = ['validate_bot'] list_display = ('creation_date', 'modification_date', 'owner', 'valid') list_filter = ('valid') def validate_bot(self, request, queryset): for bot in queryset: validate_bot.delay(bot.id, bot.code) class ChallengeAdmin(admin.ModelAdmin): list_display = ('creation_date', 'requested_by', 'challenger_bot', 'challenged_bot', 'played', 'winner_player', 'canceled') list_filter = ('canceled', 'final_challenge') class FinalChallengeAdmin(admin.ModelAdmin): def get_urls(self): urls = super(FinalChallengeAdmin, self).get_urls() my_urls = patterns('', (r'^final_challenge/$', self.final_challenge) ) return my_urls + urls actions = ['final_challenge'] def final_challenge(self, request, queryset): profiles = UserProfile.objects.filter(user__is_superuser=False, user__is_active=True).all() if not queryset: return HttpResponseRedirect('/admin') final_challenge = queryset[0] for up_player1, up_player2 in itertools.product(profiles, repeat=2): if (up_player1 == up_player2 or not up_player1.current_bot or not up_player2.current_bot): continue challenge = Challenge() challenge.requested_by = up_player1 challenge.challenger_bot = up_player1.current_bot challenge.challenged_bot = up_player2.current_bot challenge.final_challenge = final_challenge challenge.save() final_challenge.challenge_set.add(challenge) # dispatch the new task players = {up_player1.user.username: up_player1.current_bot.code, up_player2.user.username: up_player2.current_bot.code, } run_match.delay(challenge.id, players) final_challenge.save() return HttpResponseRedirect('/admin') admin.site.register(FinalChallenge, FinalChallengeAdmin) admin.site.register(UserProfile, UserProfileAdmin) admin.site.register(Challenge, ChallengeAdmin) admin.site.register(Bot, BotAdmin) added missing commas to tuple from django.contrib import admin from django.conf.urls import patterns from django.http import HttpResponseRedirect from game.tasks import validate_bot import itertools # Register your models here. from game.models import ( Bot, Challenge, UserProfile, FinalChallenge, ) from game.tasks import run_match class UserProfileAdmin(admin.ModelAdmin): list_display = ('user', 'score', 'current_bot', 'code_update_date') list_filter = ('current_bot',) class BotAdmin(admin.ModelAdmin): actions = ['validate_bot'] list_display = ('creation_date', 'modification_date', 'owner', 'valid') list_filter = ('valid',) def validate_bot(self, request, queryset): for bot in queryset: validate_bot.delay(bot.id, bot.code) class ChallengeAdmin(admin.ModelAdmin): list_display = ('creation_date', 'requested_by', 'challenger_bot', 'challenged_bot', 'played', 'winner_player', 'canceled') list_filter = ('canceled', 'final_challenge') class FinalChallengeAdmin(admin.ModelAdmin): def get_urls(self): urls = super(FinalChallengeAdmin, self).get_urls() my_urls = patterns('', (r'^final_challenge/$', self.final_challenge) ) return my_urls + urls actions = ['final_challenge'] def final_challenge(self, request, queryset): profiles = UserProfile.objects.filter(user__is_superuser=False, user__is_active=True).all() if not queryset: return HttpResponseRedirect('/admin') final_challenge = queryset[0] for up_player1, up_player2 in itertools.product(profiles, repeat=2): if (up_player1 == up_player2 or not up_player1.current_bot or not up_player2.current_bot): continue challenge = Challenge() challenge.requested_by = up_player1 challenge.challenger_bot = up_player1.current_bot challenge.challenged_bot = up_player2.current_bot challenge.final_challenge = final_challenge challenge.save() final_challenge.challenge_set.add(challenge) # dispatch the new task players = {up_player1.user.username: up_player1.current_bot.code, up_player2.user.username: up_player2.current_bot.code, } run_match.delay(challenge.id, players) final_challenge.save() return HttpResponseRedirect('/admin') admin.site.register(FinalChallenge, FinalChallengeAdmin) admin.site.register(UserProfile, UserProfileAdmin) admin.site.register(Challenge, ChallengeAdmin) admin.site.register(Bot, BotAdmin)
""" Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks). Work done by D. Waroquiers, A. Jain, and M. Kocher. The main difference wrt the Fireworks implementation is that the QueueAdapter objects provide a programmatic interface for setting important attributes such as the number of MPI nodes, the number of OMP threads and the memory requirements. This programmatic interface is used by the `TaskManager` for optimizing the parameters of the run before submitting the job (Abinit provides the autoparal option that allows one to get a list of parallel configuration and their expected efficiency). """ from __future__ import print_function, division import os import abc import string import copy import getpass import warnings from subprocess import Popen, PIPE from pymatgen.io.abinitio.launcher import ScriptEditor from pymatgen.util.string_utils import is_string import logging logger = logging.getLogger(__name__) __all__ = [ "MpiRunner", "qadapter_class", ] class Command(object): """ From https://gist.github.com/kirpit/1306188 Enables to run subprocess commands in a different thread with TIMEOUT option. Based on jcollado's solution: http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933 """ command = None process = None status = None output, error = '', '' def __init__(self, command): if is_string(command): import shlex command = shlex.split(command) self.command = command def run(self, timeout=None, **kwargs): """ Run a command then return: (status, output, error). """ def target(**kwargs): try: self.process = Popen(self.command, **kwargs) self.output, self.error = self.process.communicate() self.status = self.process.returncode except: import traceback self.error = traceback.format_exc() self.status = -1 # default stdout and stderr if 'stdout' not in kwargs: kwargs['stdout'] = PIPE if 'stderr' not in kwargs: kwargs['stderr'] = PIPE # thread import threading thread = threading.Thread(target=target, kwargs=kwargs) thread.start() thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() return self.status, self.output, self.error class MpiRunner(object): """ This object provides an abstraction for the mpirunner provided by the different MPI libraries. It's main task is handling the different syntax and options supported by the different mpirunners. """ def __init__(self, name, type=None, options=""): self.name = name self.type = None self.options = options def string_to_run(self, executable, mpi_ncpus, stdin=None, stdout=None, stderr=None): stdin = "< " + stdin if stdin is not None else "" stdout = "> " + stdout if stdout is not None else "" stderr = "2> " + stderr if stderr is not None else "" if self.has_mpirun: if self.type is None: # TODO: better treatment of mpirun syntax. #se.add_line('$MPIRUN -n $MPI_NCPUS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR') num_opt = "-n " + str(mpi_ncpus) cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr]) else: raise NotImplementedError("type %s is not supported!") else: #assert mpi_ncpus == 1 cmd = " ".join([executable, stdin, stdout, stderr]) return cmd @property def has_mpirun(self): """True if we are running via mpirun, mpiexec ...""" return self.name is not None def qadapter_class(qtype): """Return the concrete `Adapter` class from a string.""" return {"shell": ShellAdapter, "slurm": SlurmAdapter, "pbs": PbsAdapter, "sge": SGEAdapter, }[qtype.lower()] class QueueAdapterError(Exception): """Error class for exceptions raise by QueueAdapter.""" class AbstractQueueAdapter(object): """ The QueueAdapter is responsible for all interactions with a specific queue management system. This includes handling all details of queue script format as well as queue submission and management. This is the Abstract base class defining the methods that must be implemented by the concrete classes. A user should extend this class with implementations that work on specific queue systems. """ __metaclass__ = abc.ABCMeta Error = QueueAdapterError def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None, pre_run=None, post_run=None, mpi_runner=None): """ Args: setup: String or list of commands to execute during the initial setup. modules: String or list of modules to load before running the application. shell_env: Dictionary with the environment variables to export before running the application. omp_env: Dictionary with the OpenMP variables. pre_run: String or list of commands to execute before launching the calculation. post_run: String or list of commands to execute once the calculation is completed. mpi_runner: Path to the MPI runner or `MpiRunner` instance. None if not used """ # Make defensive copies so that we can change the values at runtime. self.qparams = qparams.copy() if qparams is not None else {} self._verbatim = [] if is_string(setup): setup = [setup] self.setup = setup[:] if setup is not None else [] self.omp_env = omp_env.copy() if omp_env is not None else {} if is_string(modules): modules = [modules] self.modules = modules[:] if modules is not None else [] self.shell_env = shell_env.copy() if shell_env is not None else {} self.mpi_runner = mpi_runner if not isinstance(mpi_runner, MpiRunner): self.mpi_runner = MpiRunner(mpi_runner) if is_string(pre_run): pre_run = [pre_run] self.pre_run = pre_run[:] if pre_run is not None else [] if is_string(post_run): post_run = [post_run] self.post_run = post_run[:] if post_run is not None else [] # Parse the template so that we know the list of supported options. cls = self.__class__ if hasattr(cls, "QTEMPLATE"): # Consistency check. err_msg = "" for param in self.qparams: if param not in self.supported_qparams: err_msg += "Unsupported QUEUE parameter name %s\n" % param err_msg += "Supported are: \n" for param_sup in self.supported_qparams: err_msg += " %s \n" % param_sup if err_msg: raise ValueError(err_msg) def copy(self): return copy.copy(self) def deepcopy(self): return copy.deepcopy(self) @property def supported_qparams(self): """ Dictionary with the supported parameters that can be passed to the queue manager (obtained by parsing QTEMPLATE). """ try: return self._supported_qparams except AttributeError: import re self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE) return self._supported_qparams @property def has_mpirun(self): """True if we are using a mpirunner""" return bool(self.mpi_runner) @property def has_omp(self): """True if we are using OpenMP threads""" return hasattr(self, "omp_env") and bool(getattr(self, "omp_env")) @property def tot_ncpus(self): """Total number of CPUs employed""" return self.mpi_ncpus * self.omp_ncpus @property def omp_ncpus(self): """Number of OpenMP threads.""" if self.has_omp: return self.omp_env["OMP_NUM_THREADS"] else: return 1 @abc.abstractmethod def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" @abc.abstractproperty def mpi_ncpus(self): """Number of CPUs used for MPI.""" @abc.abstractmethod def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" #@abc.abstractproperty #def queue_walltime(self): # """Returns the walltime in seconds.""" #@abc.abstractmethod #def set_queue_walltime(self): # """Set the walltime in seconds.""" #@abc.abstractproperty #def mem_per_cpu(self): # """The memory per CPU in Megabytes.""" @abc.abstractmethod def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" #@property #def tot_mem(self): # """Total memory required by the job n Megabytes.""" # return self.mem_per_cpu * self.mpi_ncpus @abc.abstractmethod def cancel(self, job_id): """ Cancel the job. Args: job_id: (in) Job identifier. Returns: Exit status. """ def add_verbatim(self, lines): """ Add a list of lines or just a string to the header. No programmatic interface to change these options is provided """ if is_string(lines): lines = [lines] self._verbatim.extend(lines) def _make_qheader(self, job_name, qout_path, qerr_path): """Return a string with the options that are passed to the resource manager.""" qtemplate = QScriptTemplate(self.QTEMPLATE) # set substitution dict for replacements into the template and clean null values subs_dict = {k: v for k, v in self.qparams.items() if v is not None} # Set job_name and the names for the stderr and stdout of the # queue manager (note the use of the extensions .qout and .qerr # so that we can easily locate this file. subs_dict['job_name'] = job_name.replace('/', '_') subs_dict['_qout_path'] = qout_path subs_dict['_qerr_path'] = qerr_path # might contain unused parameters as leftover $$. unclean_template = qtemplate.safe_substitute(subs_dict) # Remove lines with leftover $$. clean_template = [] for line in unclean_template.split('\n'): if '$$' not in line: clean_template.append(line) # Add verbatim lines if self._verbatim: clean_template.extend(self._verbatim) return '\n'.join(clean_template) def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path, stdin=None, stdout=None, stderr=None): """ Returns a (multi-line) String representing the queue script, e.g. PBS script. Uses the template_file along with internal parameters to create the script. Args: job_name: Name of the job. launch_dir: (str) The directory the job will be launched in. qout_path Path of the Queue manager output file. qerr_path: Path of the Queue manager error file. """ # PBS does not accept job_names longer than 15 chars. if len(job_name) > 14 and isinstance(self, PbsAdapter): job_name = job_name[:14] # Construct the header for the Queue Manager. qheader = self._make_qheader(job_name, qout_path, qerr_path) # Add the bash section. se = ScriptEditor() if self.setup: se.add_comment("Setup section") se.add_lines(self.setup) se.add_emptyline() if self.modules: se.add_comment("Load Modules") se.add_line("module purge") se.load_modules(self.modules) se.add_emptyline() if self.has_omp: se.add_comment("OpenMp Environment") se.declare_vars(self.omp_env) se.add_emptyline() if self.shell_env: se.add_comment("Shell Environment") se.declare_vars(self.shell_env) se.add_emptyline() # Cd to launch_dir se.add_line("cd " + os.path.abspath(launch_dir)) if self.pre_run: se.add_comment("Commands before execution") se.add_lines(self.pre_run) se.add_emptyline() # Construct the string to run the executable with MPI and mpi_ncpus. mpi_ncpus = self.mpi_ncpus line = self.mpi_runner.string_to_run(executable, mpi_ncpus, stdin=stdin, stdout=stdout, stderr=stderr) se.add_line(line) if self.post_run: se.add_emptyline() se.add_comment("Commands after execution") se.add_lines(self.post_run) shell_text = se.get_script_str() return qheader + shell_text + "\n" @abc.abstractmethod def submit_to_queue(self, script_file): """ Submits the job to the queue, probably using subprocess or shutil Args: script_file: (str) name of the script file to use (String) Returns: process, queue_id """ @abc.abstractmethod def get_njobs_in_queue(self, username=None): """ returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect) """ #some method to fix problems @abc.abstractmethod def exclude_nodes(self, nodes): """ Method to exclude nodes in the calculation """ @abc.abstractmethod def increase_mem(self, factor): """ Method to increase the amount of memory asked for, by factor. """ @abc.abstractmethod def increase_time(self, factor): """ Method to increase the available wall time asked for, by factor. """ @abc.abstractmethod def increase_cpus(self, factor): """ Method to increase the number of cpus asked for. """ # @abc.abstractmethod def increase_resources(self): """ Method to generally increase resources. """ return False #################### # Concrete classes # #################### class ShellAdapter(AbstractQueueAdapter): QTYPE = "shell" QTEMPLATE = """\ #!/bin/bash export MPI_NCPUS=$${MPI_NCPUS} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("MPI_NCPUS", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["MPI_NCPUS"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus def set_mem_per_cpu(self, mem_mb): """mem_per_cpu is not available in ShellAdapter.""" def cancel(self, job_id): return os.system("kill -9 %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: process = Popen(("/bin/bash", script_file), stderr=PIPE) queue_id = process.pid return process, queue_id except: # random error raise self.Error("Random Error ...!") def get_njobs_in_queue(self, username=None): return None def exclude_nodes(self, nodes): return False def increase_mem(self, factor): return False def increase_time(self, factor): return False def increase_cpus(self, factor): return False class SlurmAdapter(AbstractQueueAdapter): QTYPE = "slurm" QTEMPLATE = """\ #!/bin/bash #SBATCH --ntasks=$${ntasks} #SBATCH --ntasks-per-node=$${ntasks_per_node} #SBATCH --cpus-per-task=$${cpus_per_task} #SBATCH --time=$${time} #SBATCH --partition=$${partition} #SBATCH --account=$${account} #SBATCH --job-name=$${job_name} #SBATCH --nodes=$${nodes} #SBATCH --exclude=$${exclude_nodes} #SBATCH --mem=$${mem} #SBATCH --mem-per-cpu=$${mem_per_cpu} #SBATCH --mail-user=$${mail_user} #SBATCH --mail-type=$${mail_type} #SBATCH --constraint=$${constraint} #SBATCH --gres=$${gres} #SBATCH --requeue=$${requeue} #SBATCH --nodelist=$${nodelist} #SBATCH --propagate=$${propagate} #SBATCH --output=$${_qout_path} #SBATCH --error=$${_qerr_path} """ @property def limits(self): """ the limits for certain parameters set on the cluster. currently hard coded, should be read at init the increase functions will not increase beyond thise limits """ return {'max_total_tasks': 544, 'max_cpus_per_node': 16, 'mem': 6400000, 'mem_per_cpu': 64000, 'time': 2880} @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ntasks", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ntasks"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus warnings.warn("set_omp_ncpus not availabe for %s" % self.__class__.__name__) def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["mem_per_cpu"] = int(mem_mb) # Remove mem if it's defined. self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("scancel %d" % job_id) def submit_to_queue(self, script_file, submit_err_file="sbatch.err"): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) submit_err_file = os.path.join(os.path.dirname(script_file), submit_err_file) # submit the job try: cmd = ['sbatch', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) # write the err output to file, a error parser may read it and a fixer may know what to do ... with open(submit_err_file, mode='w') as f: f.write('sbatch submit process stderr:') f.write(str(process.stderr.read())) f.write('qparams:') f.write(str(self.qparams)) process.wait() # grab the returncode. SLURM returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split()[3]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code queue_id = None logger.warning('Could not parse job id following slurm...') finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) + "The error response reads: {c}".format(c=process.stderr.read())) raise self.Error(err_msg) except Exception as details: msg = 'Error while submitting job:\n' + str(details) logger.critical(msg) with open(submit_err_file, mode='a') as f: f.write(msg) try: print('sometimes we land here, no idea what is happening ... Michiel') print(details) print(cmd) print(process.returncode) except: pass # random error, e.g. no qsub on machine! raise self.Error('Running sbatch caused an error...') def exclude_nodes(self, nodes): try: if 'exclude_nodes' not in self.qparams.keys(): self.qparams.update({'exclude_nodes': 'node'+nodes[0]}) print('excluded node %s' % nodes[0]) for node in nodes[1:]: self.qparams['exclude_nodes'] += ',node'+node print('excluded node %s' % node) print(self.qparams) return True except (KeyError, IndexError): return False def increase_cpus(self, factor=1.5): print('increasing cpus') try: if self.qparams['ntasks'] > 1: # mpi parallel n = int(self.qparams['ntasks'] * factor) if n < self.limits['max_total_tasks']: self.qparams['ntasks'] = n print('increased ntasks to %s' % n) return True else: raise QueueAdapterError elif self.qparams['ntasks'] == 1 and self.qparams['cpus_per_task'] > 1: # open mp parallel n = int(self.qparams['cpus_per_task'] * factor) if n < self.limits['max_cpus_per_node']: self.qparams['cpus_per_task'] = n return True else: raise QueueAdapterError else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def increase_mem(self, factor=1.5): print('increasing memory') try: if 'mem' in self.qparams.keys(): n = int(self.qparams['mem'] * factor) if n < self.limits['mem']: self.qparams['mem'] = n print('increased mem to %s' % n) return True else: raise QueueAdapterError elif 'mem_per_cpu' in self.qparams.keys(): n = int(self.qparams['mem_per_cpu'] * factor) if n < self.limits['mem_per_cpu']: self.qparams['mem'] = n print('increased mem_per_cpu to %s' % n) return True else: raise QueueAdapterError else: raise QueueAdapterError except (KeyError, IndexError, QueueAdapterError): return False def increase_time(self, factor=1.5): print('increasing time') days, hours, minutes = 0, 0, 0 try: # a slurm time parser ;-) forgetting about seconds # feel free to pull this out and mak time in minutes always if '-' not in self.qparams['time']: # "minutes", # "minutes:seconds", # "hours:minutes:seconds", if ':' not in self.qparams['time']: minutes = int(float(self.qparams['time'])) elif self.qparams['time'].count(':') == 1: minutes = int(float(self.qparams['time'].split(':')[0])) else: minutes = int(float(self.qparams['time'].split(':')[1])) hours = int(float(self.qparams['time'].split(':')[0])) else: # "days-hours", # "days-hours:minutes", # "days-hours:minutes:seconds". days = int(float(self.qparams['time'].split('-')[0])) hours = int(float(self.qparams['time'].split('-')[1].split(':')[0])) try: minutes = int(float(self.qparams['time'].split('-')[1].split(':')[1])) except IndexError: pass time = (days * 24 + hours) * 60 + minutes time *= factor if time < self.limits['time']: self.qparams['time'] = time print('increased time to %s' % time) return True else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def get_njobs_in_queue(self, username=None): if username is None: username = getpass.getuser() cmd = ['squeue', '-o "%u"', '-u', username] process = Popen(cmd, shell=False, stdout=PIPE) process.wait() # parse the result if process.returncode == 0: # lines should have this form # username # count lines that include the username in it outs = process.stdout.readlines() njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to squeue server? err_msg = ('Error trying to get the number of jobs in the queue using squeue service' + 'The error response reads: {}'.format(process.stderr.read())) logger.critical(err_msg) return None class PbsAdapter(AbstractQueueAdapter): QTYPE = "pbs" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -N $${job_name} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l model=$${model} #PBS -l place=$${place} #PBS -W group_list=$${group_list} #PBS -l pvmem=$${pvmem}mb #PBS -l nodes=$${nodes}:ppn=$${ppn} # OLD SYNTAX #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ @property def limits(self): """ the limits for certain parameters set on the cluster. currently hard coded, should be read at init the increase functions will not increase beyond thise limits """ return {'max_total_tasks': 3888, 'time': 48, 'max_select': 120} @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("select", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["select"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus self.qparams["ompthreads"] = omp_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["pvmem"] = mem_mb self.qparams["vmem"] = mem_mb def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the return code. PBS returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split('.')[0]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) raise self.Error(msg) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-a', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should have this form # '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09' # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? print(' ** ') print(process[1].split('\n')) err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) print(' ** ') logger.critical(err_msg) return None # no need to raise an error, if False is returned the fixer may try something else, we don't need to kill the # scheduler just yet def do(self): return 'this is not FORTRAN' def exclude_nodes(self, nodes): logger.warning('exluding nodes, not implemented yet pbs') return False def increase_mem(self, factor): logger.warning('increasing mem, not implemented yet pbs') return False def increase_time(self, factor=1.5): days, hours, minutes = 0, 0, 0 try: # a pbe time parser [HH:MM]:SS # feel free to pull this out and mak time in minutes always n = str(self.qparams['time']).count(':') if n == 0: hours = int(float(self.qparams['time'])) elif n > 1: hours = int(float(self.qparams['time'].split(':')[0])) minutes = int(float(self.qparams['time'].split(':')[1])) time = hours * 60 + minutes time *= factor if time < self.limits['time']: self.qparams['time'] = str(int(time / 60)) + ':' + str(int(time - 60 * int(time / 60))) + ':00' print('increased time to %s minutes' % time) return True else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def increase_cpus(self, factor): base_increase = 12 new_cpus = self.qparams['select'] + factor * base_increase if new_cpus < self.limits['max_select']: self.qparams['select'] = new_cpus return True else: logger.warning('increasing cpus reached the limit') return False def increase_resources(self): """ Method to generally increase resources. On typical large machines we only increas cpu's since we use all mem per cpu per core """ if self.increase_cpus(1): return True else: return False class PbsOldAdapter(PbsAdapter): QTYPE = "pbsold" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -N $${job_name} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l model=$${model} #PBS -l place=$${place} #PBS -W group_list=$${group_list} ####PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads} # New syntax ####PBS -l pvmem=$${pvmem}mb #PBS -l pmem=$${pmem}mb ####PBS -l mppwidth=$${mppwidth} #PBS -l nodes=$${nodes}:ppn=$${ppn} # OLD SYNTAX #PBS -M $${mail_user} #PBS -m $${mail_type} # Submission environment #PBS -V #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["pmem"] = mem_mb self.qparams["mem"] = mem_mb class SGEAdapter(AbstractQueueAdapter): """ Adapter for Sun Grid Engine (SGE) task submission software. """ QTYPE = "sge" QTEMPLATE = """\ #!/bin/bash #$ -A $${account} #$ -N $${job_name} #$ -l h rt=$${walltime} #$ -pe $${queue} $${ncpus} #$ -cwd #$ -j y #$ -m n #$ -e $${_qerr_path} #$ -o $${_qout_path} #$ -S /bin/bash """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ncpus", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ncpus"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus warnings.warn("set_omp_ncpus not availabe for %s" % self.__class__.__name__) def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. SGE returns 0 if the job was successful if process.returncode == 0: try: # output should of the form # Your job 1659048 ("NAME_OF_JOB") has been submitted queue_id = int(process.stdout.read().split(' ')[2]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) raise self.Error(msg) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should contain username # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None def exclude_nodes(self, nodes): """ Method to exclude nodes in the calculation """ raise NotImplementedError("exclude_nodes") def increase_mem(self, factor): """ Method to increase the amount of memory asked for, by factor. """ raise NotImplementedError("increase_mem") def increase_time(self, factor): """ Method to increase the available wall time asked for, by factor. """ raise NotImplementedError("increase_time") def increase_cpus(self, factor): raise NotImplementedError("increase_cpus") class QScriptTemplate(string.Template): delimiter = '$$' Add PbsAdapter in list of adapters. """ Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks). Work done by D. Waroquiers, A. Jain, and M. Kocher. The main difference wrt the Fireworks implementation is that the QueueAdapter objects provide a programmatic interface for setting important attributes such as the number of MPI nodes, the number of OMP threads and the memory requirements. This programmatic interface is used by the `TaskManager` for optimizing the parameters of the run before submitting the job (Abinit provides the autoparal option that allows one to get a list of parallel configuration and their expected efficiency). """ from __future__ import print_function, division import os import abc import string import copy import getpass import warnings from subprocess import Popen, PIPE from pymatgen.io.abinitio.launcher import ScriptEditor from pymatgen.util.string_utils import is_string import logging logger = logging.getLogger(__name__) __all__ = [ "MpiRunner", "qadapter_class", ] class Command(object): """ From https://gist.github.com/kirpit/1306188 Enables to run subprocess commands in a different thread with TIMEOUT option. Based on jcollado's solution: http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933 """ command = None process = None status = None output, error = '', '' def __init__(self, command): if is_string(command): import shlex command = shlex.split(command) self.command = command def run(self, timeout=None, **kwargs): """ Run a command then return: (status, output, error). """ def target(**kwargs): try: self.process = Popen(self.command, **kwargs) self.output, self.error = self.process.communicate() self.status = self.process.returncode except: import traceback self.error = traceback.format_exc() self.status = -1 # default stdout and stderr if 'stdout' not in kwargs: kwargs['stdout'] = PIPE if 'stderr' not in kwargs: kwargs['stderr'] = PIPE # thread import threading thread = threading.Thread(target=target, kwargs=kwargs) thread.start() thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() return self.status, self.output, self.error class MpiRunner(object): """ This object provides an abstraction for the mpirunner provided by the different MPI libraries. It's main task is handling the different syntax and options supported by the different mpirunners. """ def __init__(self, name, type=None, options=""): self.name = name self.type = None self.options = options def string_to_run(self, executable, mpi_ncpus, stdin=None, stdout=None, stderr=None): stdin = "< " + stdin if stdin is not None else "" stdout = "> " + stdout if stdout is not None else "" stderr = "2> " + stderr if stderr is not None else "" if self.has_mpirun: if self.type is None: # TODO: better treatment of mpirun syntax. #se.add_line('$MPIRUN -n $MPI_NCPUS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR') num_opt = "-n " + str(mpi_ncpus) cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr]) else: raise NotImplementedError("type %s is not supported!") else: #assert mpi_ncpus == 1 cmd = " ".join([executable, stdin, stdout, stderr]) return cmd @property def has_mpirun(self): """True if we are running via mpirun, mpiexec ...""" return self.name is not None def qadapter_class(qtype): """Return the concrete `Adapter` class from a string.""" return {"shell": ShellAdapter, "slurm": SlurmAdapter, "pbs": PbsAdapter, "pbsold": PbsOldAdapter, "sge": SGEAdapter, }[qtype.lower()] class QueueAdapterError(Exception): """Error class for exceptions raise by QueueAdapter.""" class AbstractQueueAdapter(object): """ The QueueAdapter is responsible for all interactions with a specific queue management system. This includes handling all details of queue script format as well as queue submission and management. This is the Abstract base class defining the methods that must be implemented by the concrete classes. A user should extend this class with implementations that work on specific queue systems. """ __metaclass__ = abc.ABCMeta Error = QueueAdapterError def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None, pre_run=None, post_run=None, mpi_runner=None): """ Args: setup: String or list of commands to execute during the initial setup. modules: String or list of modules to load before running the application. shell_env: Dictionary with the environment variables to export before running the application. omp_env: Dictionary with the OpenMP variables. pre_run: String or list of commands to execute before launching the calculation. post_run: String or list of commands to execute once the calculation is completed. mpi_runner: Path to the MPI runner or `MpiRunner` instance. None if not used """ # Make defensive copies so that we can change the values at runtime. self.qparams = qparams.copy() if qparams is not None else {} self._verbatim = [] if is_string(setup): setup = [setup] self.setup = setup[:] if setup is not None else [] self.omp_env = omp_env.copy() if omp_env is not None else {} if is_string(modules): modules = [modules] self.modules = modules[:] if modules is not None else [] self.shell_env = shell_env.copy() if shell_env is not None else {} self.mpi_runner = mpi_runner if not isinstance(mpi_runner, MpiRunner): self.mpi_runner = MpiRunner(mpi_runner) if is_string(pre_run): pre_run = [pre_run] self.pre_run = pre_run[:] if pre_run is not None else [] if is_string(post_run): post_run = [post_run] self.post_run = post_run[:] if post_run is not None else [] # Parse the template so that we know the list of supported options. cls = self.__class__ if hasattr(cls, "QTEMPLATE"): # Consistency check. err_msg = "" for param in self.qparams: if param not in self.supported_qparams: err_msg += "Unsupported QUEUE parameter name %s\n" % param err_msg += "Supported are: \n" for param_sup in self.supported_qparams: err_msg += " %s \n" % param_sup if err_msg: raise ValueError(err_msg) def copy(self): return copy.copy(self) def deepcopy(self): return copy.deepcopy(self) @property def supported_qparams(self): """ Dictionary with the supported parameters that can be passed to the queue manager (obtained by parsing QTEMPLATE). """ try: return self._supported_qparams except AttributeError: import re self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE) return self._supported_qparams @property def has_mpirun(self): """True if we are using a mpirunner""" return bool(self.mpi_runner) @property def has_omp(self): """True if we are using OpenMP threads""" return hasattr(self, "omp_env") and bool(getattr(self, "omp_env")) @property def tot_ncpus(self): """Total number of CPUs employed""" return self.mpi_ncpus * self.omp_ncpus @property def omp_ncpus(self): """Number of OpenMP threads.""" if self.has_omp: return self.omp_env["OMP_NUM_THREADS"] else: return 1 @abc.abstractmethod def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" @abc.abstractproperty def mpi_ncpus(self): """Number of CPUs used for MPI.""" @abc.abstractmethod def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" #@abc.abstractproperty #def queue_walltime(self): # """Returns the walltime in seconds.""" #@abc.abstractmethod #def set_queue_walltime(self): # """Set the walltime in seconds.""" #@abc.abstractproperty #def mem_per_cpu(self): # """The memory per CPU in Megabytes.""" @abc.abstractmethod def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" #@property #def tot_mem(self): # """Total memory required by the job n Megabytes.""" # return self.mem_per_cpu * self.mpi_ncpus @abc.abstractmethod def cancel(self, job_id): """ Cancel the job. Args: job_id: (in) Job identifier. Returns: Exit status. """ def add_verbatim(self, lines): """ Add a list of lines or just a string to the header. No programmatic interface to change these options is provided """ if is_string(lines): lines = [lines] self._verbatim.extend(lines) def _make_qheader(self, job_name, qout_path, qerr_path): """Return a string with the options that are passed to the resource manager.""" qtemplate = QScriptTemplate(self.QTEMPLATE) # set substitution dict for replacements into the template and clean null values subs_dict = {k: v for k, v in self.qparams.items() if v is not None} # Set job_name and the names for the stderr and stdout of the # queue manager (note the use of the extensions .qout and .qerr # so that we can easily locate this file. subs_dict['job_name'] = job_name.replace('/', '_') subs_dict['_qout_path'] = qout_path subs_dict['_qerr_path'] = qerr_path # might contain unused parameters as leftover $$. unclean_template = qtemplate.safe_substitute(subs_dict) # Remove lines with leftover $$. clean_template = [] for line in unclean_template.split('\n'): if '$$' not in line: clean_template.append(line) # Add verbatim lines if self._verbatim: clean_template.extend(self._verbatim) return '\n'.join(clean_template) def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path, stdin=None, stdout=None, stderr=None): """ Returns a (multi-line) String representing the queue script, e.g. PBS script. Uses the template_file along with internal parameters to create the script. Args: job_name: Name of the job. launch_dir: (str) The directory the job will be launched in. qout_path Path of the Queue manager output file. qerr_path: Path of the Queue manager error file. """ # PBS does not accept job_names longer than 15 chars. if len(job_name) > 14 and isinstance(self, PbsAdapter): job_name = job_name[:14] # Construct the header for the Queue Manager. qheader = self._make_qheader(job_name, qout_path, qerr_path) # Add the bash section. se = ScriptEditor() if self.setup: se.add_comment("Setup section") se.add_lines(self.setup) se.add_emptyline() if self.modules: se.add_comment("Load Modules") se.add_line("module purge") se.load_modules(self.modules) se.add_emptyline() if self.has_omp: se.add_comment("OpenMp Environment") se.declare_vars(self.omp_env) se.add_emptyline() if self.shell_env: se.add_comment("Shell Environment") se.declare_vars(self.shell_env) se.add_emptyline() # Cd to launch_dir se.add_line("cd " + os.path.abspath(launch_dir)) if self.pre_run: se.add_comment("Commands before execution") se.add_lines(self.pre_run) se.add_emptyline() # Construct the string to run the executable with MPI and mpi_ncpus. mpi_ncpus = self.mpi_ncpus line = self.mpi_runner.string_to_run(executable, mpi_ncpus, stdin=stdin, stdout=stdout, stderr=stderr) se.add_line(line) if self.post_run: se.add_emptyline() se.add_comment("Commands after execution") se.add_lines(self.post_run) shell_text = se.get_script_str() return qheader + shell_text + "\n" @abc.abstractmethod def submit_to_queue(self, script_file): """ Submits the job to the queue, probably using subprocess or shutil Args: script_file: (str) name of the script file to use (String) Returns: process, queue_id """ @abc.abstractmethod def get_njobs_in_queue(self, username=None): """ returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect) """ #some method to fix problems @abc.abstractmethod def exclude_nodes(self, nodes): """ Method to exclude nodes in the calculation """ @abc.abstractmethod def increase_mem(self, factor): """ Method to increase the amount of memory asked for, by factor. """ @abc.abstractmethod def increase_time(self, factor): """ Method to increase the available wall time asked for, by factor. """ @abc.abstractmethod def increase_cpus(self, factor): """ Method to increase the number of cpus asked for. """ # @abc.abstractmethod def increase_resources(self): """ Method to generally increase resources. """ return False #################### # Concrete classes # #################### class ShellAdapter(AbstractQueueAdapter): QTYPE = "shell" QTEMPLATE = """\ #!/bin/bash export MPI_NCPUS=$${MPI_NCPUS} """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("MPI_NCPUS", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["MPI_NCPUS"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus def set_mem_per_cpu(self, mem_mb): """mem_per_cpu is not available in ShellAdapter.""" def cancel(self, job_id): return os.system("kill -9 %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: process = Popen(("/bin/bash", script_file), stderr=PIPE) queue_id = process.pid return process, queue_id except: # random error raise self.Error("Random Error ...!") def get_njobs_in_queue(self, username=None): return None def exclude_nodes(self, nodes): return False def increase_mem(self, factor): return False def increase_time(self, factor): return False def increase_cpus(self, factor): return False class SlurmAdapter(AbstractQueueAdapter): QTYPE = "slurm" QTEMPLATE = """\ #!/bin/bash #SBATCH --ntasks=$${ntasks} #SBATCH --ntasks-per-node=$${ntasks_per_node} #SBATCH --cpus-per-task=$${cpus_per_task} #SBATCH --time=$${time} #SBATCH --partition=$${partition} #SBATCH --account=$${account} #SBATCH --job-name=$${job_name} #SBATCH --nodes=$${nodes} #SBATCH --exclude=$${exclude_nodes} #SBATCH --mem=$${mem} #SBATCH --mem-per-cpu=$${mem_per_cpu} #SBATCH --mail-user=$${mail_user} #SBATCH --mail-type=$${mail_type} #SBATCH --constraint=$${constraint} #SBATCH --gres=$${gres} #SBATCH --requeue=$${requeue} #SBATCH --nodelist=$${nodelist} #SBATCH --propagate=$${propagate} #SBATCH --output=$${_qout_path} #SBATCH --error=$${_qerr_path} """ @property def limits(self): """ the limits for certain parameters set on the cluster. currently hard coded, should be read at init the increase functions will not increase beyond thise limits """ return {'max_total_tasks': 544, 'max_cpus_per_node': 16, 'mem': 6400000, 'mem_per_cpu': 64000, 'time': 2880} @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ntasks", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ntasks"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus warnings.warn("set_omp_ncpus not availabe for %s" % self.__class__.__name__) def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["mem_per_cpu"] = int(mem_mb) # Remove mem if it's defined. self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("scancel %d" % job_id) def submit_to_queue(self, script_file, submit_err_file="sbatch.err"): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) submit_err_file = os.path.join(os.path.dirname(script_file), submit_err_file) # submit the job try: cmd = ['sbatch', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) # write the err output to file, a error parser may read it and a fixer may know what to do ... with open(submit_err_file, mode='w') as f: f.write('sbatch submit process stderr:') f.write(str(process.stderr.read())) f.write('qparams:') f.write(str(self.qparams)) process.wait() # grab the returncode. SLURM returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split()[3]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code queue_id = None logger.warning('Could not parse job id following slurm...') finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) + "The error response reads: {c}".format(c=process.stderr.read())) raise self.Error(err_msg) except Exception as details: msg = 'Error while submitting job:\n' + str(details) logger.critical(msg) with open(submit_err_file, mode='a') as f: f.write(msg) try: print('sometimes we land here, no idea what is happening ... Michiel') print(details) print(cmd) print(process.returncode) except: pass # random error, e.g. no qsub on machine! raise self.Error('Running sbatch caused an error...') def exclude_nodes(self, nodes): try: if 'exclude_nodes' not in self.qparams.keys(): self.qparams.update({'exclude_nodes': 'node'+nodes[0]}) print('excluded node %s' % nodes[0]) for node in nodes[1:]: self.qparams['exclude_nodes'] += ',node'+node print('excluded node %s' % node) print(self.qparams) return True except (KeyError, IndexError): return False def increase_cpus(self, factor=1.5): print('increasing cpus') try: if self.qparams['ntasks'] > 1: # mpi parallel n = int(self.qparams['ntasks'] * factor) if n < self.limits['max_total_tasks']: self.qparams['ntasks'] = n print('increased ntasks to %s' % n) return True else: raise QueueAdapterError elif self.qparams['ntasks'] == 1 and self.qparams['cpus_per_task'] > 1: # open mp parallel n = int(self.qparams['cpus_per_task'] * factor) if n < self.limits['max_cpus_per_node']: self.qparams['cpus_per_task'] = n return True else: raise QueueAdapterError else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def increase_mem(self, factor=1.5): print('increasing memory') try: if 'mem' in self.qparams.keys(): n = int(self.qparams['mem'] * factor) if n < self.limits['mem']: self.qparams['mem'] = n print('increased mem to %s' % n) return True else: raise QueueAdapterError elif 'mem_per_cpu' in self.qparams.keys(): n = int(self.qparams['mem_per_cpu'] * factor) if n < self.limits['mem_per_cpu']: self.qparams['mem'] = n print('increased mem_per_cpu to %s' % n) return True else: raise QueueAdapterError else: raise QueueAdapterError except (KeyError, IndexError, QueueAdapterError): return False def increase_time(self, factor=1.5): print('increasing time') days, hours, minutes = 0, 0, 0 try: # a slurm time parser ;-) forgetting about seconds # feel free to pull this out and mak time in minutes always if '-' not in self.qparams['time']: # "minutes", # "minutes:seconds", # "hours:minutes:seconds", if ':' not in self.qparams['time']: minutes = int(float(self.qparams['time'])) elif self.qparams['time'].count(':') == 1: minutes = int(float(self.qparams['time'].split(':')[0])) else: minutes = int(float(self.qparams['time'].split(':')[1])) hours = int(float(self.qparams['time'].split(':')[0])) else: # "days-hours", # "days-hours:minutes", # "days-hours:minutes:seconds". days = int(float(self.qparams['time'].split('-')[0])) hours = int(float(self.qparams['time'].split('-')[1].split(':')[0])) try: minutes = int(float(self.qparams['time'].split('-')[1].split(':')[1])) except IndexError: pass time = (days * 24 + hours) * 60 + minutes time *= factor if time < self.limits['time']: self.qparams['time'] = time print('increased time to %s' % time) return True else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def get_njobs_in_queue(self, username=None): if username is None: username = getpass.getuser() cmd = ['squeue', '-o "%u"', '-u', username] process = Popen(cmd, shell=False, stdout=PIPE) process.wait() # parse the result if process.returncode == 0: # lines should have this form # username # count lines that include the username in it outs = process.stdout.readlines() njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to squeue server? err_msg = ('Error trying to get the number of jobs in the queue using squeue service' + 'The error response reads: {}'.format(process.stderr.read())) logger.critical(err_msg) return None class PbsAdapter(AbstractQueueAdapter): QTYPE = "pbs" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -N $${job_name} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l model=$${model} #PBS -l place=$${place} #PBS -W group_list=$${group_list} #PBS -l pvmem=$${pvmem}mb #PBS -l nodes=$${nodes}:ppn=$${ppn} # OLD SYNTAX #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ @property def limits(self): """ the limits for certain parameters set on the cluster. currently hard coded, should be read at init the increase functions will not increase beyond thise limits """ return {'max_total_tasks': 3888, 'time': 48, 'max_select': 120} @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("select", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["select"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus self.qparams["ompthreads"] = omp_ncpus def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["pvmem"] = mem_mb self.qparams["vmem"] = mem_mb def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the return code. PBS returns 0 if the job was successful if process.returncode == 0: try: # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id queue_id = int(process.stdout.read().split('.')[0]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) raise self.Error(msg) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-a', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should have this form # '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09' # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? print(' ** ') print(process[1].split('\n')) err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) print(' ** ') logger.critical(err_msg) return None # no need to raise an error, if False is returned the fixer may try something else, we don't need to kill the # scheduler just yet def do(self): return 'this is not FORTRAN' def exclude_nodes(self, nodes): logger.warning('exluding nodes, not implemented yet pbs') return False def increase_mem(self, factor): logger.warning('increasing mem, not implemented yet pbs') return False def increase_time(self, factor=1.5): days, hours, minutes = 0, 0, 0 try: # a pbe time parser [HH:MM]:SS # feel free to pull this out and mak time in minutes always n = str(self.qparams['time']).count(':') if n == 0: hours = int(float(self.qparams['time'])) elif n > 1: hours = int(float(self.qparams['time'].split(':')[0])) minutes = int(float(self.qparams['time'].split(':')[1])) time = hours * 60 + minutes time *= factor if time < self.limits['time']: self.qparams['time'] = str(int(time / 60)) + ':' + str(int(time - 60 * int(time / 60))) + ':00' print('increased time to %s minutes' % time) return True else: raise QueueAdapterError except (KeyError, QueueAdapterError): return False def increase_cpus(self, factor): base_increase = 12 new_cpus = self.qparams['select'] + factor * base_increase if new_cpus < self.limits['max_select']: self.qparams['select'] = new_cpus return True else: logger.warning('increasing cpus reached the limit') return False def increase_resources(self): """ Method to generally increase resources. On typical large machines we only increas cpu's since we use all mem per cpu per core """ if self.increase_cpus(1): return True else: return False class PbsOldAdapter(PbsAdapter): QTYPE = "pbsold" QTEMPLATE = """\ #!/bin/bash #PBS -A $${account} #PBS -N $${job_name} #PBS -l walltime=$${walltime} #PBS -q $${queue} #PBS -l model=$${model} #PBS -l place=$${place} #PBS -W group_list=$${group_list} ####PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads} # New syntax ####PBS -l pvmem=$${pvmem}mb #PBS -l pmem=$${pmem}mb ####PBS -l mppwidth=$${mppwidth} #PBS -l nodes=$${nodes}:ppn=$${ppn} # OLD SYNTAX #PBS -M $${mail_user} #PBS -m $${mail_type} # Submission environment #PBS -V #PBS -o $${_qout_path} #PBS -e $${_qerr_path} """ def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" self.qparams["pmem"] = mem_mb self.qparams["mem"] = mem_mb class SGEAdapter(AbstractQueueAdapter): """ Adapter for Sun Grid Engine (SGE) task submission software. """ QTYPE = "sge" QTEMPLATE = """\ #!/bin/bash #$ -A $${account} #$ -N $${job_name} #$ -l h rt=$${walltime} #$ -pe $${queue} $${ncpus} #$ -cwd #$ -j y #$ -m n #$ -e $${_qerr_path} #$ -o $${_qout_path} #$ -S /bin/bash """ @property def mpi_ncpus(self): """Number of CPUs used for MPI.""" return self.qparams.get("ncpus", 1) def set_mpi_ncpus(self, mpi_ncpus): """Set the number of CPUs used for MPI.""" self.qparams["ncpus"] = mpi_ncpus def set_omp_ncpus(self, omp_ncpus): """Set the number of OpenMP threads.""" self.omp_env["OMP_NUM_THREADS"] = omp_ncpus warnings.warn("set_omp_ncpus not availabe for %s" % self.__class__.__name__) def set_mem_per_cpu(self, mem_mb): """Set the memory per CPU in Megabytes""" raise NotImplementedError("") #self.qparams["mem_per_cpu"] = mem_mb ## Remove mem if it's defined. #self.qparams.pop("mem", None) def cancel(self, job_id): return os.system("qdel %d" % job_id) def submit_to_queue(self, script_file): if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) # submit the job try: cmd = ['qsub', script_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE) process.wait() # grab the returncode. SGE returns 0 if the job was successful if process.returncode == 0: try: # output should of the form # Your job 1659048 ("NAME_OF_JOB") has been submitted queue_id = int(process.stdout.read().split(' ')[2]) logger.info('Job submission was successful and queue_id is {}'.format(queue_id)) except: # probably error parsing job code logger.warning("Could not parse job id following qsub...") queue_id = None finally: return process, queue_id else: # some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc... msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) + 'The error response reads: {}'.format(process.stderr.read())) raise self.Error(msg) except: # random error, e.g. no qsub on machine! raise self.Error("Running qsub caused an error...") def get_njobs_in_queue(self, username=None): # Initialize username if username is None: username = getpass.getuser() # run qstat qstat = Command(['qstat', '-u', username]) process = qstat.run(timeout=5) # parse the result if process[0] == 0: # lines should contain username # count lines that include the username in it # TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'. outs = process[1].split('\n') njobs = len([line.split() for line in outs if username in line]) logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs # there's a problem talking to qstat server? err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' + 'The error response reads: {}'.format(process[2])) logger.critical(err_msg) return None def exclude_nodes(self, nodes): """ Method to exclude nodes in the calculation """ raise NotImplementedError("exclude_nodes") def increase_mem(self, factor): """ Method to increase the amount of memory asked for, by factor. """ raise NotImplementedError("increase_mem") def increase_time(self, factor): """ Method to increase the available wall time asked for, by factor. """ raise NotImplementedError("increase_time") def increase_cpus(self, factor): raise NotImplementedError("increase_cpus") class QScriptTemplate(string.Template): delimiter = '$$'
#!/usr/bin/python # -*- coding: utf-8 -*- # Cached file-system utility library # (C) 2016 VRT Systems # # vim: set ts=4 sts=4 et tw=78 sw=4 si: import time import os import errno import collections import stat import weakref from .intnode import _Node class Node(collections.Mapping): ''' A file-system node object. This represents a file or directory within the filesystem. It has a weak reference to the parent node and holds the metadata for that node. ''' def __init__(self, cache, abs_path): self._cache = weakref.ref(cache) self._node = _Node.get_node(abs_path) self._atime = time.time() def _update_atime(self): self._atime = time.time() @property def atime(self): return self._atime @property def atime_since(self): return time.time() - self.atime # Python conveniences def __repr__(self): # pragma: no cover # Not covered, because it's just for convenience return '%s(%r, %r)' % ( self.__class__.__name__, self._cache(), self.abs_path) def __str__(self): # pragma: no cover # Not covered, because it's just for convenience if self.is_file: file_type = 'file' elif self.is_dir: file_type = 'dir' elif self.is_link: file_type = 'link' else: file_type = 'other' return '%s{%r %s}' % ( self.__class__.__name__, self.abs_path, file_type) def __bool__(self): # pragma: no cover ''' Return True if the node exists. ''' # Not covered, because it's just for convenience try: self.stat return True except OSError: return False # Node properties @property def abs_path(self): ''' Return the node's full absolute path. ''' return self._node.abs_path @property def dir_name(self): ''' Return the full path of the node's parent directory. ''' return self._node.dir_name @property def base_name(self): ''' Return the full path of the node's parent. ''' return self._node.base_name def join(self, *elements): ''' Return a path below this node with *elements added. ''' return os.path.join(self.abs_path, *elements) def join_node(self, *elements): ''' Return the node referenced by joining the *elements. ''' return self._cache()[self.join(*elements)] @property def parent(self): ''' Return the parent node. ''' return self._cache()[self.dir_name] @property def stat(self): ''' Return the result of os.stat() on this file. ''' self._update_atime() return self._node.get_stat(self._cache()._required_time) @property def file_type(self): ''' Returns the file type for the file. ''' return stat.S_IFMT(self.stat.st_mode) @property def is_socket(self): # pragma: no cover ''' Return true if the file is a socket. ''' # Not covered by tests: Not all systems implement sockets in the # filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFSOCK @property def is_link(self): ''' Return true if the file is a symbolic link. ''' return self.file_type == stat.S_IFLNK @property def is_file(self): ''' Return true if the file is a regular file. ''' return self.file_type == stat.S_IFREG @property def is_dir(self): ''' Return true if the file is a directory. ''' return self.file_type == stat.S_IFDIR @property def is_block(self): # pragma: no cover ''' Return true if the file is a block device. ''' # Not covered by tests: Not all systems implement character devices in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFBLK @property def is_char(self): # pragma: no cover ''' Return true if the file is a character device. ''' # Not covered by tests: Not all systems implement character devices in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFCHR @property def is_fifo(self): # pragma: no cover ''' Return true if the file is a FIFO. ''' # Not covered by tests: Not all systems implement FIFOs in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFIFO # Handling of links. @property def target(self): ''' Returns the name of the file the symlink points to. ''' return self._node.get_target(self._cache()._required_time) @property def abs_target(self): ''' Returns the absolute path for the target. ''' target = self.target if not os.path.isabs(target): target = self.parent.join(target) return os.path.abspath(target) @property def abs_final_target(self): ''' Returns the absolute path for the target, following all symlinks. ''' return os.path.realpath(self.abs_path) @property def target_node(self): ''' Return the filesystem node pointed to by this symlink. ''' return self._cache()[self.abs_target] @property def final_target_node(self): ''' Return the filesystem node pointed to by this symlink. ''' return self._cache()[self.abs_final_target] # Mapping interface for directories. def __getitem__(self, key): ''' Return the child filesystem node named 'key'. ''' self._update_atime() abs_path = os.path.join(self.abs_path, key) return self._cache()[abs_path] def __iter__(self): ''' Return an iterator for all the children in this directory. ''' self._update_atime() return iter(self._node.get_children(\ self._cache()._required_time).copy()) def __len__(self): ''' Return the number of child elements in the directory. ''' self._update_atime() return len(self._node.get_children(\ self._cache()._required_time)) # Searching for child nodes. def _find(self, predicate, depth_predicate, depth, depth_first): if self.is_link: for found in self.final_target_node._find( predicate, depth_predicate, depth, depth_first): yield found return child_depth = depth+1 show_self = depth_predicate(depth) show_children = depth_predicate(child_depth) recurse = depth_predicate(depth, True) if not self.is_dir: if show_self and predicate(self): yield self return # Apply predicate to self if not depth first if (not depth_first) and show_self and predicate(self): yield self for child in self.values(): # Apply predicate to child if not a directory if show_children and (not child.is_dir) and predicate(child): yield child # Recurse into child if a directory elif recurse and child.is_dir: for found in child._find(predicate, depth_predicate, child_depth, depth_first): yield found # Apply predicate to self if depth first if depth_first and show_self and predicate(self): yield self def find(self, predicate=None, depth=None, min_depth=None, max_depth=None, depth_first=False): ''' Attempt to find nodes that match the given predicate. The depth parameters control the minimum and maximum path depth (with depth itself overriding both). Each node is passed to the function called predicate which returns True or False. If it returns True, find yields that node. ''' if depth is not None: depth_predicate = lambda d, r=False : \ r or (d == depth) elif (min_depth is None) and (max_depth is None): depth_predicate = lambda d, r=False : True elif (min_depth is not None) and (max_depth is None): depth_predicate = lambda d, r=False : \ r or (d >= min_depth) elif (min_depth is None) and (max_depth is not None): depth_predicate = lambda d, r=False : \ r or (d <= max_depth) elif (min_depth is not None) and (max_depth is not None): depth_predicate = lambda d, r=False : \ r or ((d >= min_depth) and (d <= max_depth)) if predicate is None: predicate = lambda n : True for found in self._find(predicate, depth_predicate, 0, depth_first): yield found node: Re-work `find` to give more predictable search order. #!/usr/bin/python # -*- coding: utf-8 -*- # Cached file-system utility library # (C) 2016 VRT Systems # # vim: set ts=4 sts=4 et tw=78 sw=4 si: import time import os import errno import collections import stat import weakref from .intnode import _Node class Node(collections.Mapping): ''' A file-system node object. This represents a file or directory within the filesystem. It has a weak reference to the parent node and holds the metadata for that node. ''' def __init__(self, cache, abs_path): self._cache = weakref.ref(cache) self._node = _Node.get_node(abs_path) self._atime = time.time() def _update_atime(self): self._atime = time.time() @property def atime(self): return self._atime @property def atime_since(self): return time.time() - self.atime # Python conveniences def __repr__(self): # pragma: no cover # Not covered, because it's just for convenience return '%s(%r, %r)' % ( self.__class__.__name__, self._cache(), self.abs_path) def __str__(self): # pragma: no cover # Not covered, because it's just for convenience if self.is_file: file_type = 'file' elif self.is_dir: file_type = 'dir' elif self.is_link: file_type = 'link' else: file_type = 'other' return '%s{%r %s}' % ( self.__class__.__name__, self.abs_path, file_type) def __bool__(self): # pragma: no cover ''' Return True if the node exists. ''' # Not covered, because it's just for convenience try: self.stat return True except OSError: return False # Node properties @property def abs_path(self): ''' Return the node's full absolute path. ''' return self._node.abs_path @property def dir_name(self): ''' Return the full path of the node's parent directory. ''' return self._node.dir_name @property def base_name(self): ''' Return the full path of the node's parent. ''' return self._node.base_name def join(self, *elements): ''' Return a path below this node with *elements added. ''' return os.path.join(self.abs_path, *elements) def join_node(self, *elements): ''' Return the node referenced by joining the *elements. ''' return self._cache()[self.join(*elements)] @property def parent(self): ''' Return the parent node. ''' return self._cache()[self.dir_name] @property def stat(self): ''' Return the result of os.stat() on this file. ''' self._update_atime() return self._node.get_stat(self._cache()._required_time) @property def file_type(self): ''' Returns the file type for the file. ''' return stat.S_IFMT(self.stat.st_mode) @property def is_socket(self): # pragma: no cover ''' Return true if the file is a socket. ''' # Not covered by tests: Not all systems implement sockets in the # filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFSOCK @property def is_link(self): ''' Return true if the file is a symbolic link. ''' return self.file_type == stat.S_IFLNK @property def is_file(self): ''' Return true if the file is a regular file. ''' return self.file_type == stat.S_IFREG @property def is_dir(self): ''' Return true if the file is a directory. ''' return self.file_type == stat.S_IFDIR @property def is_block(self): # pragma: no cover ''' Return true if the file is a block device. ''' # Not covered by tests: Not all systems implement character devices in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFBLK @property def is_char(self): # pragma: no cover ''' Return true if the file is a character device. ''' # Not covered by tests: Not all systems implement character devices in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFCHR @property def is_fifo(self): # pragma: no cover ''' Return true if the file is a FIFO. ''' # Not covered by tests: Not all systems implement FIFOs in # the filesystem and there isn't always permission to create them. # Implementation is "simple enough" that bugs are unlikely. return self.file_type == stat.S_IFIFO # Handling of links. @property def target(self): ''' Returns the name of the file the symlink points to. ''' return self._node.get_target(self._cache()._required_time) @property def abs_target(self): ''' Returns the absolute path for the target. ''' target = self.target if not os.path.isabs(target): target = self.parent.join(target) return os.path.abspath(target) @property def abs_final_target(self): ''' Returns the absolute path for the target, following all symlinks. ''' return os.path.realpath(self.abs_path) @property def target_node(self): ''' Return the filesystem node pointed to by this symlink. ''' return self._cache()[self.abs_target] @property def final_target_node(self): ''' Return the filesystem node pointed to by this symlink. ''' return self._cache()[self.abs_final_target] # Mapping interface for directories. def __getitem__(self, key): ''' Return the child filesystem node named 'key'. ''' self._update_atime() abs_path = os.path.join(self.abs_path, key) return self._cache()[abs_path] def __iter__(self): ''' Return an iterator for all the children in this directory. ''' self._update_atime() return iter(self._node.get_children(\ self._cache()._required_time).copy()) def __len__(self): ''' Return the number of child elements in the directory. ''' self._update_atime() return len(self._node.get_children(\ self._cache()._required_time)) # Searching for child nodes. def _find(self, predicate, depth_predicate, depth, depth_first): if self.is_link: for found in self.final_target_node._find( predicate, depth_predicate, depth, depth_first): yield found return child_depth = depth+1 show_self = (depth == 0) and depth_predicate(depth) show_children = depth_predicate(child_depth) recurse = depth_predicate(depth, True) if not self.is_dir: if show_self and predicate(self): yield self return def _recurse(): if recurse: for child in self.values(): if not child.is_dir: continue for found in child._find(predicate, depth_predicate, child_depth, depth_first): yield found def _children(): if show_children: for child in self.values(): if predicate(child): yield child def _self(): if show_self and predicate(self): yield self # Generators generators = [_self(), _children(), _recurse()] if depth_first: generators.reverse() for g in generators: for found in g: yield found def find(self, predicate=None, depth=None, min_depth=None, max_depth=None, depth_first=False): ''' Attempt to find nodes that match the given predicate. The depth parameters control the minimum and maximum path depth (with depth itself overriding both). Each node is passed to the function called predicate which returns True or False. If it returns True, find yields that node. ''' if depth is not None: depth_predicate = lambda d, r=False : \ r or (d == depth) elif (min_depth is None) and (max_depth is None): depth_predicate = lambda d, r=False : True elif (min_depth is not None) and (max_depth is None): depth_predicate = lambda d, r=False : \ r or (d >= min_depth) elif (min_depth is None) and (max_depth is not None): depth_predicate = lambda d, r=False : \ r or (d <= max_depth) elif (min_depth is not None) and (max_depth is not None): depth_predicate = lambda d, r=False : \ r or ((d >= min_depth) and (d <= max_depth)) if predicate is None: predicate = lambda n : True for found in self._find(predicate, depth_predicate, 0, depth_first): yield found
# -*- coding: utf-8 -*- from django.shortcuts import render from django.http import HttpResponse, Http404 from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User, Group from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from reportlab.pdfgen import canvas from reportlab.lib import colors from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import Table, Paragraph from io import BytesIO from .settings import BILLJOBS_DEBUG_PDF, BILLJOBS_BILL_LOGO_PATH, \ BILLJOBS_BILL_LOGO_WIDTH, BILLJOBS_BILL_LOGO_HEIGHT, \ BILLJOBS_BILL_PAYMENT_INFO from .models import Bill from billjobs.serializers import UserSerializer, GroupSerializer from .permissions import CustomUserAPIPermission, \ CustomUserDetailAPIPermission, CustomGroupAPIPermission, \ CustomGroupDetailAPIPermission from textwrap import wrap class GroupAPI(APIView): """ API endpoint to list or create groups """ permission_classes = (CustomGroupAPIPermission,) def get(self, request, format=None): """ List groups """ if request.user.is_staff is True: groups = Group.objects.all() else: groups = Group.objects.filter(user=request.user) serializer = GroupSerializer(groups, context={'request': request}, many=True) return Response(serializer.data, status=status.HTTP_200_OK) def post(self, request, format=None): """ Create a group """ serializer = GroupSerializer(data=request.data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class GroupDetailAPI(APIView): """ API endpoint that allow admin and user to retrieve, update and delete a group """ permission_classes = (CustomGroupDetailAPIPermission,) def get_object(self, pk): try: group = Group.objects.get(pk=pk) self.check_object_permissions(self.request, group) return group except Group.DoesNotExist: raise Http404 def get(self, request, pk, format=None): group = self.get_object(pk) serializer = GroupSerializer(group, context={'request': request}) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk, format=None): """ Update a group instance """ group = self.get_object(pk) serializer = GroupSerializer(group, data=request.data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, pk, format=None): """ Delete a group instance """ group = self.get_object(pk) group.delete() return Response(status=status.HTTP_204_NO_CONTENT) class UserAPI(APIView): """ API endpoint that allows admin to list or create users """ permission_classes = (CustomUserAPIPermission,) def get(self, request, format=None): """ List users for admin Retrieve user for authenticated user """ if request.user.is_staff is True: users = User.objects.all() serializer = UserSerializer(users, context={'request': request}, many=True) else: users = User.objects.get(pk=request.user.id) serializer = UserSerializer(users, context={'request': request}) return Response(serializer.data, status=status.HTTP_200_OK) def post(self, request, format=None): serializer = UserSerializer(data=request.data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class UserDetailAPI(APIView): """ API endpoint that allows admin to retrieve, update, delete a user """ permission_classes = (CustomUserDetailAPIPermission,) def get_object(self, pk): try: user = User.objects.get(pk=pk) self.check_object_permissions(self.request, user) return user except User.DoesNotExist: raise Http404 def get(self, request, pk, format=None): user = self.get_object(pk) serializer = UserSerializer(user, context={'request': request}) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk, format=None): user = self.get_object(pk) serializer = UserSerializer(user, data=request.data, context={'request': request}, partial=True) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, pk, format=None): user = self.get_object(pk) user.delete() return Response(status=status.HTTP_204_NO_CONTENT) @login_required def generate_pdf(request, bill_id): bill = Bill.objects.get(id=bill_id) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % bill.number # Create a buffer buffer = BytesIO() pdf = canvas.Canvas(buffer, pagesize=A4) # define new 0,0 bottom left with cm as margin pdf.translate(cm,cm) # define document width and height with cm as margin width, height = A4 width = width - 2*cm height = height - 2*cm # if debug draw lines for document limit if BILLJOBS_DEBUG_PDF is True: pdf.setStrokeColorRGB(1,0,0) pdf.line(0,0,width,0) pdf.line(0,0,0,height) pdf.line(0,height,width,height) pdf.line(width,height,width,0) # Put logo on top of pdf original image size is 570px/250px pdf.drawImage(BILLJOBS_BILL_LOGO_PATH, 0, height-BILLJOBS_BILL_LOGO_HEIGHT, width=BILLJOBS_BILL_LOGO_WIDTH, height=BILLJOBS_BILL_LOGO_HEIGHT) # billing information lh = 15 #define a line height pdf.setFillColorRGB(0.3,0.3,0.3) pdf.setFont("Helvetica-Bold", 14) pdf.drawRightString(width, height-lh, 'Facture'); pdf.setFont("Helvetica-Bold", 10) pdf.drawRightString(width, height-2*lh, u'Numéro : %s' % bill.number) pdf.setFont("Helvetica", 10) pdf.drawRightString(width, height-3*lh, u'Date facturation : %s' % bill.billing_date.strftime('%d/%m/%Y')) # define new height nh = height - 90 # seller pdf.setFillColorRGB(0.95,0.95,0.95) pdf.setStrokeColorRGB(1,1,1) # rect(x,y,width,height) pdf.rect(0, nh-8*lh, width/2-40, 6.4*lh, fill=1) # reset fill for text color pdf.setFillColorRGB(0.3,0.3,0.3) pdf.drawString(10, nh-lh, 'Émetteur') issuer = Paragraph(bill.issuer_address, getSampleStyleSheet()['Normal']) issuer.wrapOn(pdf, width*0.25, 6*lh) issuer.drawOn(pdf, 20, nh-6*lh) # customer pdf.drawString(width/2, nh-lh, 'Adressé à') customer = pdf.beginText() customer.setTextOrigin(width/2+20, nh-3*lh) # create text with \n and remove \r text = '%s %s\n%s' % (bill.user.first_name, bill.user.last_name, bill.billing_address.replace('\r','')) # get each line for line in text.split('\n'): customer.textOut(line) customer.moveCursor(0,lh) pdf.drawText(customer) pdf.setStrokeColorRGB(0,0,0) # rect(x,y,width,height) pdf.rect(width/2, nh-8*lh, width/2, 6.4*lh, fill=0) # define new height nh = nh - 10*lh data = [['Désignation', 'Prix unit. HT', 'Quantité', 'Total HT']] for line in bill.billline_set.all(): description = '%s - %s\n%s' % (line.service.reference, line.service.name, '\n'.join(wrap(line.service.description,62))) if line.note : description = '%s\n%s' % (description, '\n'.join(wrap(line.note,62))) line = (description, line.service.price, line.quantity, line.total) data.append(line) data.append(('TVA non applicable art-293B du CGI', '', 'Total HT', '%s €' % bill.amount)) data.append(('', '', 'TVA 0%', '0')) data.append(('', '', 'Total TTC', '%s €' % bill.amount)) # widths in percent of pdf width colWidths = (width*0.55, width*0.15, width*0.15, width*0.15) style = [('GRID', (0,0), (-1,0),1, colors.black), ('GRID', (-2,-3), (-1,-1), 1, colors.black), ('BOX', (0,1), (0,-4), 1, colors.black), ('BOX', (1,1), (1,-4), 1, colors.black), ('BOX', (2,1), (2,-4), 1, colors.black), ('BOX', (-1,1), (-1,-4), 1, colors.black), ('ALIGN',(0,0),(0,-1),'LEFT'), ('ALIGN',(1,0),(-1,-1),'CENTER'), ('ALIGN',(-1,0),(-1,-1),'RIGHT'), ('FONTNAME', (0,-3), (0,-3), 'Helvetica-Bold'), ] table = Table(data, colWidths=colWidths, style=style) # create table and get width and height t_width, t_height = table.wrap(0,0) table.drawOn(pdf, 0, nh-t_height) p = Paragraph(BILLJOBS_BILL_PAYMENT_INFO, getSampleStyleSheet()['Normal']) p.wrapOn(pdf, width*0.6, 100) p.drawOn(pdf, 0, 3*lh) pdf.line(0, 2*lh, width, 2*lh) pdf.setFontSize(8) pdf.drawCentredString(width/2.0, lh, 'Association Loi 1901') pdf.showPage() pdf.save() # get pdf from buffer and return it to response genpdf = buffer.getvalue() buffer.close() response.write(genpdf) return response remove rest framework import and views # -*- coding: utf-8 -*- from django.shortcuts import render from django.http import HttpResponse, Http404 from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User, Group from reportlab.pdfgen import canvas from reportlab.lib import colors from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import Table, Paragraph from io import BytesIO from .settings import BILLJOBS_DEBUG_PDF, BILLJOBS_BILL_LOGO_PATH, \ BILLJOBS_BILL_LOGO_WIDTH, BILLJOBS_BILL_LOGO_HEIGHT, \ BILLJOBS_BILL_PAYMENT_INFO from .models import Bill from textwrap import wrap @login_required def generate_pdf(request, bill_id): bill = Bill.objects.get(id=bill_id) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % bill.number # Create a buffer buffer = BytesIO() pdf = canvas.Canvas(buffer, pagesize=A4) # define new 0,0 bottom left with cm as margin pdf.translate(cm,cm) # define document width and height with cm as margin width, height = A4 width = width - 2*cm height = height - 2*cm # if debug draw lines for document limit if BILLJOBS_DEBUG_PDF is True: pdf.setStrokeColorRGB(1,0,0) pdf.line(0,0,width,0) pdf.line(0,0,0,height) pdf.line(0,height,width,height) pdf.line(width,height,width,0) # Put logo on top of pdf original image size is 570px/250px pdf.drawImage(BILLJOBS_BILL_LOGO_PATH, 0, height-BILLJOBS_BILL_LOGO_HEIGHT, width=BILLJOBS_BILL_LOGO_WIDTH, height=BILLJOBS_BILL_LOGO_HEIGHT) # billing information lh = 15 #define a line height pdf.setFillColorRGB(0.3,0.3,0.3) pdf.setFont("Helvetica-Bold", 14) pdf.drawRightString(width, height-lh, 'Facture'); pdf.setFont("Helvetica-Bold", 10) pdf.drawRightString(width, height-2*lh, u'Numéro : %s' % bill.number) pdf.setFont("Helvetica", 10) pdf.drawRightString(width, height-3*lh, u'Date facturation : %s' % bill.billing_date.strftime('%d/%m/%Y')) # define new height nh = height - 90 # seller pdf.setFillColorRGB(0.95,0.95,0.95) pdf.setStrokeColorRGB(1,1,1) # rect(x,y,width,height) pdf.rect(0, nh-8*lh, width/2-40, 6.4*lh, fill=1) # reset fill for text color pdf.setFillColorRGB(0.3,0.3,0.3) pdf.drawString(10, nh-lh, 'Émetteur') issuer = Paragraph(bill.issuer_address, getSampleStyleSheet()['Normal']) issuer.wrapOn(pdf, width*0.25, 6*lh) issuer.drawOn(pdf, 20, nh-6*lh) # customer pdf.drawString(width/2, nh-lh, 'Adressé à') customer = pdf.beginText() customer.setTextOrigin(width/2+20, nh-3*lh) # create text with \n and remove \r text = '%s %s\n%s' % (bill.user.first_name, bill.user.last_name, bill.billing_address.replace('\r','')) # get each line for line in text.split('\n'): customer.textOut(line) customer.moveCursor(0,lh) pdf.drawText(customer) pdf.setStrokeColorRGB(0,0,0) # rect(x,y,width,height) pdf.rect(width/2, nh-8*lh, width/2, 6.4*lh, fill=0) # define new height nh = nh - 10*lh data = [['Désignation', 'Prix unit. HT', 'Quantité', 'Total HT']] for line in bill.billline_set.all(): description = '%s - %s\n%s' % (line.service.reference, line.service.name, '\n'.join(wrap(line.service.description,62))) if line.note : description = '%s\n%s' % (description, '\n'.join(wrap(line.note,62))) line = (description, line.service.price, line.quantity, line.total) data.append(line) data.append(('TVA non applicable art-293B du CGI', '', 'Total HT', '%s €' % bill.amount)) data.append(('', '', 'TVA 0%', '0')) data.append(('', '', 'Total TTC', '%s €' % bill.amount)) # widths in percent of pdf width colWidths = (width*0.55, width*0.15, width*0.15, width*0.15) style = [('GRID', (0,0), (-1,0),1, colors.black), ('GRID', (-2,-3), (-1,-1), 1, colors.black), ('BOX', (0,1), (0,-4), 1, colors.black), ('BOX', (1,1), (1,-4), 1, colors.black), ('BOX', (2,1), (2,-4), 1, colors.black), ('BOX', (-1,1), (-1,-4), 1, colors.black), ('ALIGN',(0,0),(0,-1),'LEFT'), ('ALIGN',(1,0),(-1,-1),'CENTER'), ('ALIGN',(-1,0),(-1,-1),'RIGHT'), ('FONTNAME', (0,-3), (0,-3), 'Helvetica-Bold'), ] table = Table(data, colWidths=colWidths, style=style) # create table and get width and height t_width, t_height = table.wrap(0,0) table.drawOn(pdf, 0, nh-t_height) p = Paragraph(BILLJOBS_BILL_PAYMENT_INFO, getSampleStyleSheet()['Normal']) p.wrapOn(pdf, width*0.6, 100) p.drawOn(pdf, 0, 3*lh) pdf.line(0, 2*lh, width, 2*lh) pdf.setFontSize(8) pdf.drawCentredString(width/2.0, lh, 'Association Loi 1901') pdf.showPage() pdf.save() # get pdf from buffer and return it to response genpdf = buffer.getvalue() buffer.close() response.write(genpdf) return response